repo
stringlengths
6
47
file_url
stringlengths
77
269
file_path
stringlengths
5
186
content
stringlengths
0
32.8k
language
stringclasses
1 value
license
stringclasses
7 values
commit_sha
stringlengths
40
40
retrieved_at
stringdate
2026-01-07 08:35:43
2026-01-07 08:55:24
truncated
bool
2 classes
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/jesseduffield/go-git/v5/plumbing/format/packfile/common.go
vendor/github.com/jesseduffield/go-git/v5/plumbing/format/packfile/common.go
package packfile import ( "io" "github.com/jesseduffield/go-git/v5/plumbing/storer" "github.com/jesseduffield/go-git/v5/utils/ioutil" ) var signature = []byte{'P', 'A', 'C', 'K'} const ( // VersionSupported is the packfile version supported by this package VersionSupported uint32 = 2 firstLengthBits = uint8(4) // the first byte into object header has 4 bits to store the length lengthBits = uint8(7) // subsequent bytes has 7 bits to store the length maskFirstLength = 15 // 0000 1111 maskContinue = 0x80 // 1000 0000 maskLength = uint8(127) // 0111 1111 maskType = uint8(112) // 0111 0000 ) // UpdateObjectStorage updates the storer with the objects in the given // packfile. func UpdateObjectStorage(s storer.Storer, packfile io.Reader) error { if pw, ok := s.(storer.PackfileWriter); ok { return WritePackfileToObjectStorage(pw, packfile) } p, err := NewParserWithStorage(NewScanner(packfile), s) if err != nil { return err } _, err = p.Parse() return err } // WritePackfileToObjectStorage writes all the packfile objects into the given // object storage. func WritePackfileToObjectStorage( sw storer.PackfileWriter, packfile io.Reader, ) (err error) { w, err := sw.PackfileWriter() if err != nil { return err } defer ioutil.CheckClose(w, &err) var n int64 n, err = io.Copy(w, packfile) if err == nil && n == 0 { return ErrEmptyPackfile } return err }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/jesseduffield/go-git/v5/plumbing/format/config/option.go
vendor/github.com/jesseduffield/go-git/v5/plumbing/format/config/option.go
package config import ( "fmt" "strings" ) // Option defines a key/value entity in a config file. type Option struct { // Key preserving original caseness. // Use IsKey instead to compare key regardless of caseness. Key string // Original value as string, could be not normalized. Value string } type Options []*Option // IsKey returns true if the given key matches // this option's key in a case-insensitive comparison. func (o *Option) IsKey(key string) bool { return strings.EqualFold(o.Key, key) } func (opts Options) GoString() string { var strs []string for _, opt := range opts { strs = append(strs, fmt.Sprintf("%#v", opt)) } return strings.Join(strs, ", ") } // Get gets the value for the given key if set, // otherwise it returns the empty string. // // Note that there is no difference // // This matches git behaviour since git v1.8.1-rc1, // if there are multiple definitions of a key, the // last one wins. // // See: http://article.gmane.org/gmane.linux.kernel/1407184 // // In order to get all possible values for the same key, // use GetAll. func (opts Options) Get(key string) string { for i := len(opts) - 1; i >= 0; i-- { o := opts[i] if o.IsKey(key) { return o.Value } } return "" } // Has checks if an Option exist with the given key. func (opts Options) Has(key string) bool { for _, o := range opts { if o.IsKey(key) { return true } } return false } // GetAll returns all possible values for the same key. func (opts Options) GetAll(key string) []string { result := []string{} for _, o := range opts { if o.IsKey(key) { result = append(result, o.Value) } } return result } func (opts Options) withoutOption(key string) Options { result := Options{} for _, o := range opts { if !o.IsKey(key) { result = append(result, o) } } return result } func (opts Options) withAddedOption(key string, value string) Options { return append(opts, &Option{key, value}) } func (opts Options) withSettedOption(key string, values ...string) Options { var result Options var added []string for _, o := range opts { if !o.IsKey(key) { result = append(result, o) continue } if contains(values, o.Value) { added = append(added, o.Value) result = append(result, o) continue } } for _, value := range values { if contains(added, value) { continue } result = result.withAddedOption(key, value) } return result } func contains(haystack []string, needle string) bool { for _, s := range haystack { if s == needle { return true } } return false }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/jesseduffield/go-git/v5/plumbing/format/config/decoder.go
vendor/github.com/jesseduffield/go-git/v5/plumbing/format/config/decoder.go
package config import ( "io" "github.com/go-git/gcfg" ) // A Decoder reads and decodes config files from an input stream. type Decoder struct { io.Reader } // NewDecoder returns a new decoder that reads from r. func NewDecoder(r io.Reader) *Decoder { return &Decoder{r} } // Decode reads the whole config from its input and stores it in the // value pointed to by config. func (d *Decoder) Decode(config *Config) error { cb := func(s string, ss string, k string, v string, bv bool) error { if ss == "" && k == "" { config.Section(s) return nil } if ss != "" && k == "" { config.Section(s).Subsection(ss) return nil } config.AddOption(s, ss, k, v) return nil } return gcfg.ReadWithCallback(d, cb) }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/jesseduffield/go-git/v5/plumbing/format/config/format.go
vendor/github.com/jesseduffield/go-git/v5/plumbing/format/config/format.go
package config // RepositoryFormatVersion represents the repository format version, // as per defined at: // // https://git-scm.com/docs/repository-version type RepositoryFormatVersion string const ( // Version_0 is the format defined by the initial version of git, // including but not limited to the format of the repository // directory, the repository configuration file, and the object // and ref storage. // // Specifying the complete behavior of git is beyond the scope // of this document. Version_0 = "0" // Version_1 is identical to version 0, with the following exceptions: // // 1. When reading the core.repositoryformatversion variable, a git // implementation which supports version 1 MUST also read any // configuration keys found in the extensions section of the // configuration file. // // 2. If a version-1 repository specifies any extensions.* keys that // the running git has not implemented, the operation MUST NOT proceed. // Similarly, if the value of any known key is not understood by the // implementation, the operation MUST NOT proceed. // // Note that if no extensions are specified in the config file, then // core.repositoryformatversion SHOULD be set to 0 (setting it to 1 provides // no benefit, and makes the repository incompatible with older // implementations of git). Version_1 = "1" // DefaultRepositoryFormatVersion holds the default repository format version. DefaultRepositoryFormatVersion = Version_0 ) // ObjectFormat defines the object format. type ObjectFormat string const ( // SHA1 represents the object format used for SHA1. SHA1 ObjectFormat = "sha1" // SHA256 represents the object format used for SHA256. SHA256 ObjectFormat = "sha256" // DefaultObjectFormat holds the default object format. DefaultObjectFormat = SHA1 )
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/jesseduffield/go-git/v5/plumbing/format/config/section.go
vendor/github.com/jesseduffield/go-git/v5/plumbing/format/config/section.go
package config import ( "fmt" "strings" ) // Section is the representation of a section inside git configuration files. // Each Section contains Options that are used by both the Git plumbing // and the porcelains. // Sections can be further divided into subsections. To begin a subsection // put its name in double quotes, separated by space from the section name, // in the section header, like in the example below: // // [section "subsection"] // // All the other lines (and the remainder of the line after the section header) // are recognized as option variables, in the form "name = value" (or just name, // which is a short-hand to say that the variable is the boolean "true"). // The variable names are case-insensitive, allow only alphanumeric characters // and -, and must start with an alphabetic character: // // [section "subsection1"] // option1 = value1 // option2 // [section "subsection2"] // option3 = value2 // type Section struct { Name string Options Options Subsections Subsections } type Subsection struct { Name string Options Options } type Sections []*Section func (s Sections) GoString() string { var strs []string for _, ss := range s { strs = append(strs, fmt.Sprintf("%#v", ss)) } return strings.Join(strs, ", ") } type Subsections []*Subsection func (s Subsections) GoString() string { var strs []string for _, ss := range s { strs = append(strs, fmt.Sprintf("%#v", ss)) } return strings.Join(strs, ", ") } // IsName checks if the name provided is equals to the Section name, case insensitive. func (s *Section) IsName(name string) bool { return strings.EqualFold(s.Name, name) } // Subsection returns a Subsection from the specified Section. If the // Subsection does not exists, new one is created and added to Section. func (s *Section) Subsection(name string) *Subsection { for i := len(s.Subsections) - 1; i >= 0; i-- { ss := s.Subsections[i] if ss.IsName(name) { return ss } } ss := &Subsection{Name: name} s.Subsections = append(s.Subsections, ss) return ss } // HasSubsection checks if the Section has a Subsection with the specified name. func (s *Section) HasSubsection(name string) bool { for _, ss := range s.Subsections { if ss.IsName(name) { return true } } return false } // RemoveSubsection removes a subsection from a Section. func (s *Section) RemoveSubsection(name string) *Section { result := Subsections{} for _, s := range s.Subsections { if !s.IsName(name) { result = append(result, s) } } s.Subsections = result return s } // Option returns the value for the specified key. Empty string is returned if // key does not exists. func (s *Section) Option(key string) string { return s.Options.Get(key) } // OptionAll returns all possible values for an option with the specified key. // If the option does not exists, an empty slice will be returned. func (s *Section) OptionAll(key string) []string { return s.Options.GetAll(key) } // HasOption checks if the Section has an Option with the given key. func (s *Section) HasOption(key string) bool { return s.Options.Has(key) } // AddOption adds a new Option to the Section. The updated Section is returned. func (s *Section) AddOption(key string, value string) *Section { s.Options = s.Options.withAddedOption(key, value) return s } // SetOption adds a new Option to the Section. If the option already exists, is replaced. // The updated Section is returned. func (s *Section) SetOption(key string, value string) *Section { s.Options = s.Options.withSettedOption(key, value) return s } // Remove an option with the specified key. The updated Section is returned. func (s *Section) RemoveOption(key string) *Section { s.Options = s.Options.withoutOption(key) return s } // IsName checks if the name of the subsection is exactly the specified name. func (s *Subsection) IsName(name string) bool { return s.Name == name } // Option returns an option with the specified key. If the option does not exists, // empty spring will be returned. func (s *Subsection) Option(key string) string { return s.Options.Get(key) } // OptionAll returns all possible values for an option with the specified key. // If the option does not exists, an empty slice will be returned. func (s *Subsection) OptionAll(key string) []string { return s.Options.GetAll(key) } // HasOption checks if the Subsection has an Option with the given key. func (s *Subsection) HasOption(key string) bool { return s.Options.Has(key) } // AddOption adds a new Option to the Subsection. The updated Subsection is returned. func (s *Subsection) AddOption(key string, value string) *Subsection { s.Options = s.Options.withAddedOption(key, value) return s } // SetOption adds a new Option to the Subsection. If the option already exists, is replaced. // The updated Subsection is returned. func (s *Subsection) SetOption(key string, value ...string) *Subsection { s.Options = s.Options.withSettedOption(key, value...) return s } // RemoveOption removes the option with the specified key. The updated Subsection is returned. func (s *Subsection) RemoveOption(key string) *Subsection { s.Options = s.Options.withoutOption(key) return s }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/jesseduffield/go-git/v5/plumbing/format/config/doc.go
vendor/github.com/jesseduffield/go-git/v5/plumbing/format/config/doc.go
// Package config implements encoding and decoding of git config files. // // Configuration File // ------------------ // // The Git configuration file contains a number of variables that affect // the Git commands' behavior. The `.git/config` file in each repository // is used to store the configuration for that repository, and // `$HOME/.gitconfig` is used to store a per-user configuration as // fallback values for the `.git/config` file. The file `/etc/gitconfig` // can be used to store a system-wide default configuration. // // The configuration variables are used by both the Git plumbing // and the porcelains. The variables are divided into sections, wherein // the fully qualified variable name of the variable itself is the last // dot-separated segment and the section name is everything before the last // dot. The variable names are case-insensitive, allow only alphanumeric // characters and `-`, and must start with an alphabetic character. Some // variables may appear multiple times; we say then that the variable is // multivalued. // // Syntax // ~~~~~~ // // The syntax is fairly flexible and permissive; whitespaces are mostly // ignored. The '#' and ';' characters begin comments to the end of line, // blank lines are ignored. // // The file consists of sections and variables. A section begins with // the name of the section in square brackets and continues until the next // section begins. Section names are case-insensitive. Only alphanumeric // characters, `-` and `.` are allowed in section names. Each variable // must belong to some section, which means that there must be a section // header before the first setting of a variable. // // Sections can be further divided into subsections. To begin a subsection // put its name in double quotes, separated by space from the section name, // in the section header, like in the example below: // // -------- // [section "subsection"] // // -------- // // Subsection names are case sensitive and can contain any characters except // newline (doublequote `"` and backslash can be included by escaping them // as `\"` and `\\`, respectively). Section headers cannot span multiple // lines. Variables may belong directly to a section or to a given subsection. // You can have `[section]` if you have `[section "subsection"]`, but you // don't need to. // // There is also a deprecated `[section.subsection]` syntax. With this // syntax, the subsection name is converted to lower-case and is also // compared case sensitively. These subsection names follow the same // restrictions as section names. // // All the other lines (and the remainder of the line after the section // header) are recognized as setting variables, in the form // 'name = value' (or just 'name', which is a short-hand to say that // the variable is the boolean "true"). // The variable names are case-insensitive, allow only alphanumeric characters // and `-`, and must start with an alphabetic character. // // A line that defines a value can be continued to the next line by // ending it with a `\`; the backquote and the end-of-line are // stripped. Leading whitespaces after 'name =', the remainder of the // line after the first comment character '#' or ';', and trailing // whitespaces of the line are discarded unless they are enclosed in // double quotes. Internal whitespaces within the value are retained // verbatim. // // Inside double quotes, double quote `"` and backslash `\` characters // must be escaped: use `\"` for `"` and `\\` for `\`. // // The following escape sequences (beside `\"` and `\\`) are recognized: // `\n` for newline character (NL), `\t` for horizontal tabulation (HT, TAB) // and `\b` for backspace (BS). Other char escape sequences (including octal // escape sequences) are invalid. // // Includes // ~~~~~~~~ // // You can include one config file from another by setting the special // `include.path` variable to the name of the file to be included. The // variable takes a pathname as its value, and is subject to tilde // expansion. // // The included file is expanded immediately, as if its contents had been // found at the location of the include directive. If the value of the // `include.path` variable is a relative path, the path is considered to be // relative to the configuration file in which the include directive was // found. See below for examples. // // // Example // ~~~~~~~ // // # Core variables // [core] // ; Don't trust file modes // filemode = false // // # Our diff algorithm // [diff] // external = /usr/local/bin/diff-wrapper // renames = true // // [branch "devel"] // remote = origin // merge = refs/heads/devel // // # Proxy settings // [core] // gitProxy="ssh" for "kernel.org" // gitProxy=default-proxy ; for the rest // // [include] // path = /path/to/foo.inc ; include by absolute path // path = foo ; expand "foo" relative to the current file // path = ~/foo ; expand "foo" in your `$HOME` directory // package config
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/jesseduffield/go-git/v5/plumbing/format/config/encoder.go
vendor/github.com/jesseduffield/go-git/v5/plumbing/format/config/encoder.go
package config import ( "fmt" "io" "strings" ) // An Encoder writes config files to an output stream. type Encoder struct { w io.Writer } var ( subsectionReplacer = strings.NewReplacer(`"`, `\"`, `\`, `\\`) valueReplacer = strings.NewReplacer(`"`, `\"`, `\`, `\\`, "\n", `\n`, "\t", `\t`, "\b", `\b`) ) // NewEncoder returns a new encoder that writes to w. func NewEncoder(w io.Writer) *Encoder { return &Encoder{w} } // Encode writes the config in git config format to the stream of the encoder. func (e *Encoder) Encode(cfg *Config) error { for _, s := range cfg.Sections { if err := e.encodeSection(s); err != nil { return err } } return nil } func (e *Encoder) encodeSection(s *Section) error { if len(s.Options) > 0 { if err := e.printf("[%s]\n", s.Name); err != nil { return err } if err := e.encodeOptions(s.Options); err != nil { return err } } for _, ss := range s.Subsections { if err := e.encodeSubsection(s.Name, ss); err != nil { return err } } return nil } func (e *Encoder) encodeSubsection(sectionName string, s *Subsection) error { if err := e.printf("[%s \"%s\"]\n", sectionName, subsectionReplacer.Replace(s.Name)); err != nil { return err } return e.encodeOptions(s.Options) } func (e *Encoder) encodeOptions(opts Options) error { for _, o := range opts { var value string if strings.ContainsAny(o.Value, "#;\"\t\n\\") || strings.HasPrefix(o.Value, " ") || strings.HasSuffix(o.Value, " ") { value = `"`+valueReplacer.Replace(o.Value)+`"` } else { value = o.Value } if err := e.printf("\t%s = %s\n", o.Key, value); err != nil { return err } } return nil } func (e *Encoder) printf(msg string, args ...interface{}) error { _, err := fmt.Fprintf(e.w, msg, args...) return err }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/jesseduffield/go-git/v5/plumbing/format/config/common.go
vendor/github.com/jesseduffield/go-git/v5/plumbing/format/config/common.go
package config // New creates a new config instance. func New() *Config { return &Config{} } // Config contains all the sections, comments and includes from a config file. type Config struct { Comment *Comment Sections Sections Includes Includes } // Includes is a list of Includes in a config file. type Includes []*Include // Include is a reference to an included config file. type Include struct { Path string Config *Config } // Comment string without the prefix '#' or ';'. type Comment string const ( // NoSubsection token is passed to Config.Section and Config.SetSection to // represent the absence of a section. NoSubsection = "" ) // Section returns a existing section with the given name or creates a new one. func (c *Config) Section(name string) *Section { for i := len(c.Sections) - 1; i >= 0; i-- { s := c.Sections[i] if s.IsName(name) { return s } } s := &Section{Name: name} c.Sections = append(c.Sections, s) return s } // HasSection checks if the Config has a section with the specified name. func (c *Config) HasSection(name string) bool { for _, s := range c.Sections { if s.IsName(name) { return true } } return false } // RemoveSection removes a section from a config file. func (c *Config) RemoveSection(name string) *Config { result := Sections{} for _, s := range c.Sections { if !s.IsName(name) { result = append(result, s) } } c.Sections = result return c } // RemoveSubsection remove a subsection from a config file. func (c *Config) RemoveSubsection(section string, subsection string) *Config { for _, s := range c.Sections { if s.IsName(section) { result := Subsections{} for _, ss := range s.Subsections { if !ss.IsName(subsection) { result = append(result, ss) } } s.Subsections = result } } return c } // AddOption adds an option to a given section and subsection. Use the // NoSubsection constant for the subsection argument if no subsection is wanted. func (c *Config) AddOption(section string, subsection string, key string, value string) *Config { if subsection == "" { c.Section(section).AddOption(key, value) } else { c.Section(section).Subsection(subsection).AddOption(key, value) } return c } // SetOption sets an option to a given section and subsection. Use the // NoSubsection constant for the subsection argument if no subsection is wanted. func (c *Config) SetOption(section string, subsection string, key string, value string) *Config { if subsection == "" { c.Section(section).SetOption(key, value) } else { c.Section(section).Subsection(subsection).SetOption(key, value) } return c }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/jesseduffield/go-git/v5/plumbing/transport/common.go
vendor/github.com/jesseduffield/go-git/v5/plumbing/transport/common.go
// Package transport includes the implementation for different transport // protocols. // // `Client` can be used to fetch and send packfiles to a git server. // The `client` package provides higher level functions to instantiate the // appropriate `Client` based on the repository URL. // // go-git supports HTTP and SSH (see `Protocols`), but you can also install // your own protocols (see the `client` package). // // Each protocol has its own implementation of `Client`, but you should // generally not use them directly, use `client.NewClient` instead. package transport import ( "bytes" "context" "errors" "fmt" "io" "net/url" "path/filepath" "strconv" "strings" giturl "github.com/jesseduffield/go-git/v5/internal/url" "github.com/jesseduffield/go-git/v5/plumbing" "github.com/jesseduffield/go-git/v5/plumbing/protocol/packp" "github.com/jesseduffield/go-git/v5/plumbing/protocol/packp/capability" ) var ( ErrRepositoryNotFound = errors.New("repository not found") ErrEmptyRemoteRepository = errors.New("remote repository is empty") ErrAuthenticationRequired = errors.New("authentication required") ErrAuthorizationFailed = errors.New("authorization failed") ErrEmptyUploadPackRequest = errors.New("empty git-upload-pack given") ErrInvalidAuthMethod = errors.New("invalid auth method") ErrAlreadyConnected = errors.New("session already established") ) const ( UploadPackServiceName = "git-upload-pack" ReceivePackServiceName = "git-receive-pack" ) // Transport can initiate git-upload-pack and git-receive-pack processes. // It is implemented both by the client and the server, making this a RPC. type Transport interface { // NewUploadPackSession starts a git-upload-pack session for an endpoint. NewUploadPackSession(*Endpoint, AuthMethod) (UploadPackSession, error) // NewReceivePackSession starts a git-receive-pack session for an endpoint. NewReceivePackSession(*Endpoint, AuthMethod) (ReceivePackSession, error) } type Session interface { // AdvertisedReferences retrieves the advertised references for a // repository. // If the repository does not exist, returns ErrRepositoryNotFound. // If the repository exists, but is empty, returns ErrEmptyRemoteRepository. AdvertisedReferences() (*packp.AdvRefs, error) // AdvertisedReferencesContext retrieves the advertised references for a // repository. // If the repository does not exist, returns ErrRepositoryNotFound. // If the repository exists, but is empty, returns ErrEmptyRemoteRepository. AdvertisedReferencesContext(context.Context) (*packp.AdvRefs, error) io.Closer } type AuthMethod interface { fmt.Stringer Name() string } // UploadPackSession represents a git-upload-pack session. // A git-upload-pack session has two steps: reference discovery // (AdvertisedReferences) and uploading pack (UploadPack). type UploadPackSession interface { Session // UploadPack takes a git-upload-pack request and returns a response, // including a packfile. Don't be confused by terminology, the client // side of a git-upload-pack is called git-fetch-pack, although here // the same interface is used to make it RPC-like. UploadPack(context.Context, *packp.UploadPackRequest) (*packp.UploadPackResponse, error) } // ReceivePackSession represents a git-receive-pack session. // A git-receive-pack session has two steps: reference discovery // (AdvertisedReferences) and receiving pack (ReceivePack). // In that order. type ReceivePackSession interface { Session // ReceivePack sends an update references request and a packfile // reader and returns a ReportStatus and error. Don't be confused by // terminology, the client side of a git-receive-pack is called // git-send-pack, although here the same interface is used to make it // RPC-like. ReceivePack(context.Context, *packp.ReferenceUpdateRequest) (*packp.ReportStatus, error) } // Endpoint represents a Git URL in any supported protocol. type Endpoint struct { // Protocol is the protocol of the endpoint (e.g. git, https, file). Protocol string // User is the user. User string // Password is the password. Password string // Host is the host. Host string // Port is the port to connect, if 0 the default port for the given protocol // will be used. Port int // Path is the repository path. Path string // InsecureSkipTLS skips ssl verify if protocol is https InsecureSkipTLS bool // CaBundle specify additional ca bundle with system cert pool CaBundle []byte // Proxy provides info required for connecting to a proxy. Proxy ProxyOptions } type ProxyOptions struct { URL string Username string Password string } func (o *ProxyOptions) Validate() error { if o.URL != "" { _, err := url.Parse(o.URL) return err } return nil } func (o *ProxyOptions) FullURL() (*url.URL, error) { proxyURL, err := url.Parse(o.URL) if err != nil { return nil, err } if o.Username != "" { if o.Password != "" { proxyURL.User = url.UserPassword(o.Username, o.Password) } else { proxyURL.User = url.User(o.Username) } } return proxyURL, nil } var defaultPorts = map[string]int{ "http": 80, "https": 443, "git": 9418, "ssh": 22, } // String returns a string representation of the Git URL. func (u *Endpoint) String() string { var buf bytes.Buffer if u.Protocol != "" { buf.WriteString(u.Protocol) buf.WriteByte(':') } if u.Protocol != "" || u.Host != "" || u.User != "" || u.Password != "" { buf.WriteString("//") if u.User != "" || u.Password != "" { buf.WriteString(url.PathEscape(u.User)) if u.Password != "" { buf.WriteByte(':') buf.WriteString(url.PathEscape(u.Password)) } buf.WriteByte('@') } if u.Host != "" { buf.WriteString(u.Host) if u.Port != 0 { port, ok := defaultPorts[strings.ToLower(u.Protocol)] if !ok || ok && port != u.Port { fmt.Fprintf(&buf, ":%d", u.Port) } } } } if u.Path != "" && u.Path[0] != '/' && u.Host != "" { buf.WriteByte('/') } buf.WriteString(u.Path) return buf.String() } func NewEndpoint(endpoint string) (*Endpoint, error) { if e, ok := parseSCPLike(endpoint); ok { return e, nil } if e, ok := parseFile(endpoint); ok { return e, nil } return parseURL(endpoint) } func parseURL(endpoint string) (*Endpoint, error) { u, err := url.Parse(endpoint) if err != nil { return nil, err } if !u.IsAbs() { return nil, plumbing.NewPermanentError(fmt.Errorf( "invalid endpoint: %s", endpoint, )) } var user, pass string if u.User != nil { user = u.User.Username() pass, _ = u.User.Password() } host := u.Hostname() if strings.Contains(host, ":") { // IPv6 address host = "[" + host + "]" } return &Endpoint{ Protocol: u.Scheme, User: user, Password: pass, Host: host, Port: getPort(u), Path: getPath(u), }, nil } func getPort(u *url.URL) int { p := u.Port() if p == "" { return 0 } i, err := strconv.Atoi(p) if err != nil { return 0 } return i } func getPath(u *url.URL) string { var res string = u.Path if u.RawQuery != "" { res += "?" + u.RawQuery } if u.Fragment != "" { res += "#" + u.Fragment } return res } func parseSCPLike(endpoint string) (*Endpoint, bool) { if giturl.MatchesScheme(endpoint) || !giturl.MatchesScpLike(endpoint) { return nil, false } user, host, portStr, path := giturl.FindScpLikeComponents(endpoint) port, err := strconv.Atoi(portStr) if err != nil { port = 22 } return &Endpoint{ Protocol: "ssh", User: user, Host: host, Port: port, Path: path, }, true } func parseFile(endpoint string) (*Endpoint, bool) { if giturl.MatchesScheme(endpoint) { return nil, false } path, err := filepath.Abs(endpoint) if err != nil { return nil, false } return &Endpoint{ Protocol: "file", Path: path, }, true } // UnsupportedCapabilities are the capabilities not supported by any client // implementation var UnsupportedCapabilities = []capability.Capability{ capability.MultiACK, capability.MultiACKDetailed, capability.ThinPack, } // FilterUnsupportedCapabilities it filter out all the UnsupportedCapabilities // from a capability.List, the intended usage is on the client implementation // to filter the capabilities from an AdvRefs message. func FilterUnsupportedCapabilities(list *capability.List) { for _, c := range UnsupportedCapabilities { list.Delete(c) } }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/jesseduffield/go-git/v5/plumbing/transport/file/client.go
vendor/github.com/jesseduffield/go-git/v5/plumbing/transport/file/client.go
// Package file implements the file transport protocol. package file import ( "bufio" "errors" "io" "os" "path/filepath" "runtime" "strings" "github.com/jesseduffield/go-git/v5/plumbing/transport" "github.com/jesseduffield/go-git/v5/plumbing/transport/internal/common" "golang.org/x/sys/execabs" ) // DefaultClient is the default local client. var DefaultClient = NewClient( transport.UploadPackServiceName, transport.ReceivePackServiceName, ) type runner struct { UploadPackBin string ReceivePackBin string } // NewClient returns a new local client using the given git-upload-pack and // git-receive-pack binaries. func NewClient(uploadPackBin, receivePackBin string) transport.Transport { return common.NewClient(&runner{ UploadPackBin: uploadPackBin, ReceivePackBin: receivePackBin, }) } func prefixExecPath(cmd string) (string, error) { // Use `git --exec-path` to find the exec path. execCmd := execabs.Command("git", "--exec-path") stdout, err := execCmd.StdoutPipe() if err != nil { return "", err } stdoutBuf := bufio.NewReader(stdout) err = execCmd.Start() if err != nil { return "", err } execPathBytes, isPrefix, err := stdoutBuf.ReadLine() if err != nil { return "", err } if isPrefix { return "", errors.New("couldn't read exec-path line all at once") } err = execCmd.Wait() if err != nil { return "", err } execPath := string(execPathBytes) execPath = strings.TrimSpace(execPath) cmd = filepath.Join(execPath, cmd) // Make sure it actually exists. _, err = execabs.LookPath(cmd) if err != nil { return "", err } return cmd, nil } func (r *runner) Command(cmd string, ep *transport.Endpoint, auth transport.AuthMethod, ) (common.Command, error) { switch cmd { case transport.UploadPackServiceName: cmd = r.UploadPackBin case transport.ReceivePackServiceName: cmd = r.ReceivePackBin } _, err := execabs.LookPath(cmd) if err != nil { if e, ok := err.(*execabs.Error); ok && e.Err == execabs.ErrNotFound { cmd, err = prefixExecPath(cmd) if err != nil { return nil, err } } else { return nil, err } } return &command{cmd: execabs.Command(cmd, adjustPathForWindows(ep.Path))}, nil } func isDriveLetter(c byte) bool { return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') } // On Windows, the path that results from a file: URL has a leading slash. This // has to be removed if there's a drive letter func adjustPathForWindows(p string) string { if runtime.GOOS != "windows" { return p } if len(p) >= 3 && p[0] == '/' && isDriveLetter(p[1]) && p[2] == ':' { return p[1:] } return p } type command struct { cmd *execabs.Cmd stderrCloser io.Closer closed bool } func (c *command) Start() error { return c.cmd.Start() } func (c *command) StderrPipe() (io.Reader, error) { // Pipe returned by Command.StderrPipe has a race with Read + Command.Wait. // We use an io.Pipe and close it after the command finishes. r, w := io.Pipe() c.cmd.Stderr = w c.stderrCloser = r return r, nil } func (c *command) StdinPipe() (io.WriteCloser, error) { return c.cmd.StdinPipe() } func (c *command) StdoutPipe() (io.Reader, error) { return c.cmd.StdoutPipe() } func (c *command) Kill() error { c.cmd.Process.Kill() return c.Close() } // Close waits for the command to exit. func (c *command) Close() error { if c.closed { return nil } defer func() { c.closed = true _ = c.stderrCloser.Close() }() err := c.cmd.Wait() if _, ok := err.(*os.PathError); ok { return nil } // When a repository does not exist, the command exits with code 128. if _, ok := err.(*execabs.ExitError); ok { return nil } return err }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/jesseduffield/go-git/v5/plumbing/transport/file/server.go
vendor/github.com/jesseduffield/go-git/v5/plumbing/transport/file/server.go
package file import ( "fmt" "os" "github.com/jesseduffield/go-git/v5/plumbing/transport" "github.com/jesseduffield/go-git/v5/plumbing/transport/internal/common" "github.com/jesseduffield/go-git/v5/plumbing/transport/server" "github.com/jesseduffield/go-git/v5/utils/ioutil" ) // ServeUploadPack serves a git-upload-pack request using standard output, input // and error. This is meant to be used when implementing a git-upload-pack // command. func ServeUploadPack(path string) error { ep, err := transport.NewEndpoint(path) if err != nil { return err } // TODO: define and implement a server-side AuthMethod s, err := server.DefaultServer.NewUploadPackSession(ep, nil) if err != nil { return fmt.Errorf("error creating session: %s", err) } return common.ServeUploadPack(srvCmd, s) } // ServeReceivePack serves a git-receive-pack request using standard output, // input and error. This is meant to be used when implementing a // git-receive-pack command. func ServeReceivePack(path string) error { ep, err := transport.NewEndpoint(path) if err != nil { return err } // TODO: define and implement a server-side AuthMethod s, err := server.DefaultServer.NewReceivePackSession(ep, nil) if err != nil { return fmt.Errorf("error creating session: %s", err) } return common.ServeReceivePack(srvCmd, s) } var srvCmd = common.ServerCommand{ Stdin: os.Stdin, Stdout: ioutil.WriteNopCloser(os.Stdout), Stderr: os.Stderr, }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/jesseduffield/go-git/v5/plumbing/transport/ssh/auth_method.go
vendor/github.com/jesseduffield/go-git/v5/plumbing/transport/ssh/auth_method.go
package ssh import ( "errors" "fmt" "os" "os/user" "path/filepath" "github.com/jesseduffield/go-git/v5/plumbing/transport" "github.com/skeema/knownhosts" sshagent "github.com/xanzy/ssh-agent" "golang.org/x/crypto/ssh" ) const DefaultUsername = "git" // AuthMethod is the interface all auth methods for the ssh client // must implement. The clientConfig method returns the ssh client // configuration needed to establish an ssh connection. type AuthMethod interface { transport.AuthMethod // ClientConfig should return a valid ssh.ClientConfig to be used to create // a connection to the SSH server. ClientConfig() (*ssh.ClientConfig, error) } // The names of the AuthMethod implementations. To be returned by the // Name() method. Most git servers only allow PublicKeysName and // PublicKeysCallbackName. const ( KeyboardInteractiveName = "ssh-keyboard-interactive" PasswordName = "ssh-password" PasswordCallbackName = "ssh-password-callback" PublicKeysName = "ssh-public-keys" PublicKeysCallbackName = "ssh-public-key-callback" ) // KeyboardInteractive implements AuthMethod by using a // prompt/response sequence controlled by the server. type KeyboardInteractive struct { User string Challenge ssh.KeyboardInteractiveChallenge HostKeyCallbackHelper } func (a *KeyboardInteractive) Name() string { return KeyboardInteractiveName } func (a *KeyboardInteractive) String() string { return fmt.Sprintf("user: %s, name: %s", a.User, a.Name()) } func (a *KeyboardInteractive) ClientConfig() (*ssh.ClientConfig, error) { return a.SetHostKeyCallback(&ssh.ClientConfig{ User: a.User, Auth: []ssh.AuthMethod{ a.Challenge, }, }) } // Password implements AuthMethod by using the given password. type Password struct { User string Password string HostKeyCallbackHelper } func (a *Password) Name() string { return PasswordName } func (a *Password) String() string { return fmt.Sprintf("user: %s, name: %s", a.User, a.Name()) } func (a *Password) ClientConfig() (*ssh.ClientConfig, error) { return a.SetHostKeyCallback(&ssh.ClientConfig{ User: a.User, Auth: []ssh.AuthMethod{ssh.Password(a.Password)}, }) } // PasswordCallback implements AuthMethod by using a callback // to fetch the password. type PasswordCallback struct { User string Callback func() (pass string, err error) HostKeyCallbackHelper } func (a *PasswordCallback) Name() string { return PasswordCallbackName } func (a *PasswordCallback) String() string { return fmt.Sprintf("user: %s, name: %s", a.User, a.Name()) } func (a *PasswordCallback) ClientConfig() (*ssh.ClientConfig, error) { return a.SetHostKeyCallback(&ssh.ClientConfig{ User: a.User, Auth: []ssh.AuthMethod{ssh.PasswordCallback(a.Callback)}, }) } // PublicKeys implements AuthMethod by using the given key pairs. type PublicKeys struct { User string Signer ssh.Signer HostKeyCallbackHelper } // NewPublicKeys returns a PublicKeys from a PEM encoded private key. An // encryption password should be given if the pemBytes contains a password // encrypted PEM block otherwise password should be empty. It supports RSA // (PKCS#1), PKCS#8, DSA (OpenSSL), and ECDSA private keys. func NewPublicKeys(user string, pemBytes []byte, password string) (*PublicKeys, error) { signer, err := ssh.ParsePrivateKey(pemBytes) if _, ok := err.(*ssh.PassphraseMissingError); ok { signer, err = ssh.ParsePrivateKeyWithPassphrase(pemBytes, []byte(password)) } if err != nil { return nil, err } return &PublicKeys{User: user, Signer: signer}, nil } // NewPublicKeysFromFile returns a PublicKeys from a file containing a PEM // encoded private key. An encryption password should be given if the pemBytes // contains a password encrypted PEM block otherwise password should be empty. func NewPublicKeysFromFile(user, pemFile, password string) (*PublicKeys, error) { bytes, err := os.ReadFile(pemFile) if err != nil { return nil, err } return NewPublicKeys(user, bytes, password) } func (a *PublicKeys) Name() string { return PublicKeysName } func (a *PublicKeys) String() string { return fmt.Sprintf("user: %s, name: %s", a.User, a.Name()) } func (a *PublicKeys) ClientConfig() (*ssh.ClientConfig, error) { return a.SetHostKeyCallback(&ssh.ClientConfig{ User: a.User, Auth: []ssh.AuthMethod{ssh.PublicKeys(a.Signer)}, }) } func username() (string, error) { var username string if user, err := user.Current(); err == nil { username = user.Username } else { username = os.Getenv("USER") } if username == "" { return "", errors.New("failed to get username") } return username, nil } // PublicKeysCallback implements AuthMethod by asking a // ssh.agent.Agent to act as a signer. type PublicKeysCallback struct { User string Callback func() (signers []ssh.Signer, err error) HostKeyCallbackHelper } // NewSSHAgentAuth returns a PublicKeysCallback based on a SSH agent, it opens // a pipe with the SSH agent and uses the pipe as the implementer of the public // key callback function. func NewSSHAgentAuth(u string) (*PublicKeysCallback, error) { var err error if u == "" { u, err = username() if err != nil { return nil, err } } a, _, err := sshagent.New() if err != nil { return nil, fmt.Errorf("error creating SSH agent: %q", err) } return &PublicKeysCallback{ User: u, Callback: a.Signers, }, nil } func (a *PublicKeysCallback) Name() string { return PublicKeysCallbackName } func (a *PublicKeysCallback) String() string { return fmt.Sprintf("user: %s, name: %s", a.User, a.Name()) } func (a *PublicKeysCallback) ClientConfig() (*ssh.ClientConfig, error) { return a.SetHostKeyCallback(&ssh.ClientConfig{ User: a.User, Auth: []ssh.AuthMethod{ssh.PublicKeysCallback(a.Callback)}, }) } // NewKnownHostsCallback returns ssh.HostKeyCallback based on a file based on a // known_hosts file. http://man.openbsd.org/sshd#SSH_KNOWN_HOSTS_FILE_FORMAT // // If list of files is empty, then it will be read from the SSH_KNOWN_HOSTS // environment variable, example: // // /home/foo/custom_known_hosts_file:/etc/custom_known/hosts_file // // If SSH_KNOWN_HOSTS is not set the following file locations will be used: // // ~/.ssh/known_hosts // /etc/ssh/ssh_known_hosts func NewKnownHostsCallback(files ...string) (ssh.HostKeyCallback, error) { kh, err := newKnownHosts(files...) return ssh.HostKeyCallback(kh), err } func newKnownHosts(files ...string) (knownhosts.HostKeyCallback, error) { var err error if len(files) == 0 { if files, err = getDefaultKnownHostsFiles(); err != nil { return nil, err } } if files, err = filterKnownHostsFiles(files...); err != nil { return nil, err } return knownhosts.New(files...) } func getDefaultKnownHostsFiles() ([]string, error) { files := filepath.SplitList(os.Getenv("SSH_KNOWN_HOSTS")) if len(files) != 0 { return files, nil } homeDirPath, err := os.UserHomeDir() if err != nil { return nil, err } return []string{ filepath.Join(homeDirPath, "/.ssh/known_hosts"), "/etc/ssh/ssh_known_hosts", }, nil } func filterKnownHostsFiles(files ...string) ([]string, error) { var out []string for _, file := range files { _, err := os.Stat(file) if err == nil { out = append(out, file) continue } if !os.IsNotExist(err) { return nil, err } } if len(out) == 0 { return nil, fmt.Errorf("unable to find any valid known_hosts file, set SSH_KNOWN_HOSTS env variable") } return out, nil } // HostKeyCallbackHelper is a helper that provides common functionality to // configure HostKeyCallback into a ssh.ClientConfig. type HostKeyCallbackHelper struct { // HostKeyCallback is the function type used for verifying server keys. // If nil default callback will be create using NewKnownHostsCallback // without argument. HostKeyCallback ssh.HostKeyCallback } // SetHostKeyCallback sets the field HostKeyCallback in the given cfg. If // HostKeyCallback is empty a default callback is created using // NewKnownHostsCallback. func (m *HostKeyCallbackHelper) SetHostKeyCallback(cfg *ssh.ClientConfig) (*ssh.ClientConfig, error) { var err error if m.HostKeyCallback == nil { if m.HostKeyCallback, err = NewKnownHostsCallback(); err != nil { return cfg, err } } cfg.HostKeyCallback = m.HostKeyCallback return cfg, nil }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/jesseduffield/go-git/v5/plumbing/transport/ssh/common.go
vendor/github.com/jesseduffield/go-git/v5/plumbing/transport/ssh/common.go
// Package ssh implements the SSH transport protocol. package ssh import ( "context" "fmt" "net" "reflect" "strconv" "strings" "github.com/jesseduffield/go-git/v5/plumbing/transport" "github.com/jesseduffield/go-git/v5/plumbing/transport/internal/common" "github.com/skeema/knownhosts" "github.com/kevinburke/ssh_config" "golang.org/x/crypto/ssh" "golang.org/x/net/proxy" ) // DefaultClient is the default SSH client. var DefaultClient = NewClient(nil) // DefaultSSHConfig is the reader used to access parameters stored in the // system's ssh_config files. If nil all the ssh_config are ignored. var DefaultSSHConfig sshConfig = ssh_config.DefaultUserSettings type sshConfig interface { Get(alias, key string) string } // NewClient creates a new SSH client with an optional *ssh.ClientConfig. func NewClient(config *ssh.ClientConfig) transport.Transport { return common.NewClient(&runner{config: config}) } // DefaultAuthBuilder is the function used to create a default AuthMethod, when // the user doesn't provide any. var DefaultAuthBuilder = func(user string) (AuthMethod, error) { return NewSSHAgentAuth(user) } const DefaultPort = 22 type runner struct { config *ssh.ClientConfig } func (r *runner) Command(cmd string, ep *transport.Endpoint, auth transport.AuthMethod) (common.Command, error) { c := &command{command: cmd, endpoint: ep, config: r.config} if auth != nil { if err := c.setAuth(auth); err != nil { return nil, err } } if err := c.connect(); err != nil { return nil, err } return c, nil } type command struct { *ssh.Session connected bool command string endpoint *transport.Endpoint client *ssh.Client auth AuthMethod config *ssh.ClientConfig } func (c *command) setAuth(auth transport.AuthMethod) error { a, ok := auth.(AuthMethod) if !ok { return transport.ErrInvalidAuthMethod } c.auth = a return nil } func (c *command) Start() error { return c.Session.Start(endpointToCommand(c.command, c.endpoint)) } // Close closes the SSH session and connection. func (c *command) Close() error { if !c.connected { return nil } c.connected = false //XXX: If did read the full packfile, then the session might be already // closed. _ = c.Session.Close() err := c.client.Close() //XXX: in go1.16+ we can use errors.Is(err, net.ErrClosed) if err != nil && strings.HasSuffix(err.Error(), "use of closed network connection") { return nil } return err } // connect connects to the SSH server, unless a AuthMethod was set with // SetAuth method, by default uses an auth method based on PublicKeysCallback, // it connects to a SSH agent, using the address stored in the SSH_AUTH_SOCK // environment var. func (c *command) connect() error { if c.connected { return transport.ErrAlreadyConnected } if c.auth == nil { if err := c.setAuthFromEndpoint(); err != nil { return err } } var err error config, err := c.auth.ClientConfig() if err != nil { return err } hostWithPort := c.getHostWithPort() if config.HostKeyCallback == nil { kh, err := newKnownHosts() if err != nil { return err } config.HostKeyCallback = kh.HostKeyCallback() config.HostKeyAlgorithms = kh.HostKeyAlgorithms(hostWithPort) } else if len(config.HostKeyAlgorithms) == 0 { // Set the HostKeyAlgorithms based on HostKeyCallback. // For background see https://github.com/go-git/go-git/issues/411 as well as // https://github.com/golang/go/issues/29286 for root cause. config.HostKeyAlgorithms = knownhosts.HostKeyAlgorithms(config.HostKeyCallback, hostWithPort) } overrideConfig(c.config, config) c.client, err = dial("tcp", hostWithPort, c.endpoint.Proxy, config) if err != nil { return err } c.Session, err = c.client.NewSession() if err != nil { _ = c.client.Close() return err } c.connected = true return nil } func dial(network, addr string, proxyOpts transport.ProxyOptions, config *ssh.ClientConfig) (*ssh.Client, error) { var ( ctx = context.Background() cancel context.CancelFunc ) if config.Timeout > 0 { ctx, cancel = context.WithTimeout(ctx, config.Timeout) } else { ctx, cancel = context.WithCancel(ctx) } defer cancel() var conn net.Conn var dialErr error if proxyOpts.URL != "" { proxyUrl, err := proxyOpts.FullURL() if err != nil { return nil, err } dialer, err := proxy.FromURL(proxyUrl, proxy.Direct) if err != nil { return nil, err } // Try to use a ContextDialer, but fall back to a Dialer if that goes south. ctxDialer, ok := dialer.(proxy.ContextDialer) if !ok { return nil, fmt.Errorf("expected ssh proxy dialer to be of type %s; got %s", reflect.TypeOf(ctxDialer), reflect.TypeOf(dialer)) } conn, dialErr = ctxDialer.DialContext(ctx, "tcp", addr) } else { conn, dialErr = proxy.Dial(ctx, network, addr) } if dialErr != nil { return nil, dialErr } c, chans, reqs, err := ssh.NewClientConn(conn, addr, config) if err != nil { return nil, err } return ssh.NewClient(c, chans, reqs), nil } func (c *command) getHostWithPort() string { if addr, found := c.doGetHostWithPortFromSSHConfig(); found { return addr } host := c.endpoint.Host port := c.endpoint.Port if port <= 0 { port = DefaultPort } return net.JoinHostPort(host, strconv.Itoa(port)) } func (c *command) doGetHostWithPortFromSSHConfig() (addr string, found bool) { if DefaultSSHConfig == nil { return } host := c.endpoint.Host port := c.endpoint.Port configHost := DefaultSSHConfig.Get(c.endpoint.Host, "Hostname") if configHost != "" { host = configHost found = true } if !found { return } configPort := DefaultSSHConfig.Get(c.endpoint.Host, "Port") if configPort != "" { if i, err := strconv.Atoi(configPort); err == nil { port = i } } addr = net.JoinHostPort(host, strconv.Itoa(port)) return } func (c *command) setAuthFromEndpoint() error { var err error c.auth, err = DefaultAuthBuilder(c.endpoint.User) return err } func endpointToCommand(cmd string, ep *transport.Endpoint) string { return fmt.Sprintf("%s '%s'", cmd, ep.Path) } func overrideConfig(overrides *ssh.ClientConfig, c *ssh.ClientConfig) { if overrides == nil { return } t := reflect.TypeOf(*c) vc := reflect.ValueOf(c).Elem() vo := reflect.ValueOf(overrides).Elem() for i := 0; i < t.NumField(); i++ { f := t.Field(i) vcf := vc.FieldByName(f.Name) vof := vo.FieldByName(f.Name) vcf.Set(vof) } *c = vc.Interface().(ssh.ClientConfig) }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/jesseduffield/go-git/v5/plumbing/transport/git/common.go
vendor/github.com/jesseduffield/go-git/v5/plumbing/transport/git/common.go
// Package git implements the git transport protocol. package git import ( "io" "net" "strconv" "github.com/jesseduffield/go-git/v5/plumbing/protocol/packp" "github.com/jesseduffield/go-git/v5/plumbing/transport" "github.com/jesseduffield/go-git/v5/plumbing/transport/internal/common" "github.com/jesseduffield/go-git/v5/utils/ioutil" ) // DefaultClient is the default git client. var DefaultClient = common.NewClient(&runner{}) const DefaultPort = 9418 type runner struct{} // Command returns a new Command for the given cmd in the given Endpoint func (r *runner) Command(cmd string, ep *transport.Endpoint, auth transport.AuthMethod) (common.Command, error) { // auth not allowed since git protocol doesn't support authentication if auth != nil { return nil, transport.ErrInvalidAuthMethod } c := &command{command: cmd, endpoint: ep} if err := c.connect(); err != nil { return nil, err } return c, nil } type command struct { conn net.Conn connected bool command string endpoint *transport.Endpoint } // Start executes the command sending the required message to the TCP connection func (c *command) Start() error { req := packp.GitProtoRequest{ RequestCommand: c.command, Pathname: c.endpoint.Path, } host := c.endpoint.Host if c.endpoint.Port != DefaultPort { host = net.JoinHostPort(c.endpoint.Host, strconv.Itoa(c.endpoint.Port)) } req.Host = host return req.Encode(c.conn) } func (c *command) connect() error { if c.connected { return transport.ErrAlreadyConnected } var err error c.conn, err = net.Dial("tcp", c.getHostWithPort()) if err != nil { return err } c.connected = true return nil } func (c *command) getHostWithPort() string { host := c.endpoint.Host port := c.endpoint.Port if port <= 0 { port = DefaultPort } return net.JoinHostPort(host, strconv.Itoa(port)) } // StderrPipe git protocol doesn't have any dedicated error channel func (c *command) StderrPipe() (io.Reader, error) { return nil, nil } // StdinPipe returns the underlying connection as WriteCloser, wrapped to prevent // call to the Close function from the connection, a command execution in git // protocol can't be closed or killed func (c *command) StdinPipe() (io.WriteCloser, error) { return ioutil.WriteNopCloser(c.conn), nil } // StdoutPipe returns the underlying connection as Reader func (c *command) StdoutPipe() (io.Reader, error) { return c.conn, nil } // Close closes the TCP connection and connection. func (c *command) Close() error { if !c.connected { return nil } c.connected = false return c.conn.Close() }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/jesseduffield/go-git/v5/plumbing/transport/internal/common/mocks.go
vendor/github.com/jesseduffield/go-git/v5/plumbing/transport/internal/common/mocks.go
package common import ( "bytes" "io" gogitioutil "github.com/jesseduffield/go-git/v5/utils/ioutil" "github.com/jesseduffield/go-git/v5/plumbing/transport" ) type MockCommand struct { stdin bytes.Buffer stdout bytes.Buffer stderr bytes.Buffer } func (c MockCommand) StderrPipe() (io.Reader, error) { return &c.stderr, nil } func (c MockCommand) StdinPipe() (io.WriteCloser, error) { return gogitioutil.WriteNopCloser(&c.stdin), nil } func (c MockCommand) StdoutPipe() (io.Reader, error) { return &c.stdout, nil } func (c MockCommand) Start() error { return nil } func (c MockCommand) Close() error { panic("not implemented") } type MockCommander struct { stderr string } func (c MockCommander) Command(cmd string, ep *transport.Endpoint, auth transport.AuthMethod) (Command, error) { return &MockCommand{ stderr: *bytes.NewBufferString(c.stderr), }, nil }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/jesseduffield/go-git/v5/plumbing/transport/internal/common/server.go
vendor/github.com/jesseduffield/go-git/v5/plumbing/transport/internal/common/server.go
package common import ( "context" "fmt" "io" "github.com/jesseduffield/go-git/v5/plumbing/protocol/packp" "github.com/jesseduffield/go-git/v5/plumbing/transport" "github.com/jesseduffield/go-git/v5/utils/ioutil" ) // ServerCommand is used for a single server command execution. type ServerCommand struct { Stderr io.Writer Stdout io.WriteCloser Stdin io.Reader } func ServeUploadPack(cmd ServerCommand, s transport.UploadPackSession) (err error) { ioutil.CheckClose(cmd.Stdout, &err) ar, err := s.AdvertisedReferences() if err != nil { return err } if err := ar.Encode(cmd.Stdout); err != nil { return err } req := packp.NewUploadPackRequest() if err := req.Decode(cmd.Stdin); err != nil { return err } var resp *packp.UploadPackResponse resp, err = s.UploadPack(context.TODO(), req) if err != nil { return err } return resp.Encode(cmd.Stdout) } func ServeReceivePack(cmd ServerCommand, s transport.ReceivePackSession) error { ar, err := s.AdvertisedReferences() if err != nil { return fmt.Errorf("internal error in advertised references: %s", err) } if err := ar.Encode(cmd.Stdout); err != nil { return fmt.Errorf("error in advertised references encoding: %s", err) } req := packp.NewReferenceUpdateRequest() if err := req.Decode(cmd.Stdin); err != nil { return fmt.Errorf("error decoding: %s", err) } rs, err := s.ReceivePack(context.TODO(), req) if rs != nil { if err := rs.Encode(cmd.Stdout); err != nil { return fmt.Errorf("error in encoding report status %s", err) } } if err != nil { return fmt.Errorf("error in receive pack: %s", err) } return nil }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/jesseduffield/go-git/v5/plumbing/transport/internal/common/common.go
vendor/github.com/jesseduffield/go-git/v5/plumbing/transport/internal/common/common.go
// Package common implements the git pack protocol with a pluggable transport. // This is a low-level package to implement new transports. Use a concrete // implementation instead (e.g. http, file, ssh). // // A simple example of usage can be found in the file package. package common import ( "bufio" "context" "errors" "fmt" "io" "regexp" "strings" "time" "github.com/jesseduffield/go-git/v5/plumbing/format/pktline" "github.com/jesseduffield/go-git/v5/plumbing/protocol/packp" "github.com/jesseduffield/go-git/v5/plumbing/protocol/packp/capability" "github.com/jesseduffield/go-git/v5/plumbing/protocol/packp/sideband" "github.com/jesseduffield/go-git/v5/plumbing/transport" "github.com/jesseduffield/go-git/v5/utils/ioutil" ) const ( readErrorSecondsTimeout = 10 ) var ( ErrTimeoutExceeded = errors.New("timeout exceeded") // stdErrSkipPattern is used for skipping lines from a command's stderr output. // Any line matching this pattern will be skipped from further // processing and not be returned to calling code. stdErrSkipPattern = regexp.MustCompile("^remote:( =*){0,1}$") ) // Commander creates Command instances. This is the main entry point for // transport implementations. type Commander interface { // Command creates a new Command for the given git command and // endpoint. cmd can be git-upload-pack or git-receive-pack. An // error should be returned if the endpoint is not supported or the // command cannot be created (e.g. binary does not exist, connection // cannot be established). Command(cmd string, ep *transport.Endpoint, auth transport.AuthMethod) (Command, error) } // Command is used for a single command execution. // This interface is modeled after exec.Cmd and ssh.Session in the standard // library. type Command interface { // StderrPipe returns a pipe that will be connected to the command's // standard error when the command starts. It should not be called after // Start. StderrPipe() (io.Reader, error) // StdinPipe returns a pipe that will be connected to the command's // standard input when the command starts. It should not be called after // Start. The pipe should be closed when no more input is expected. StdinPipe() (io.WriteCloser, error) // StdoutPipe returns a pipe that will be connected to the command's // standard output when the command starts. It should not be called after // Start. StdoutPipe() (io.Reader, error) // Start starts the specified command. It does not wait for it to // complete. Start() error // Close closes the command and releases any resources used by it. It // will block until the command exits. Close() error } // CommandKiller expands the Command interface, enabling it for being killed. type CommandKiller interface { // Kill and close the session whatever the state it is. It will block until // the command is terminated. Kill() error } type client struct { cmdr Commander } // NewClient creates a new client using the given Commander. func NewClient(runner Commander) transport.Transport { return &client{runner} } // NewUploadPackSession creates a new UploadPackSession. func (c *client) NewUploadPackSession(ep *transport.Endpoint, auth transport.AuthMethod) ( transport.UploadPackSession, error) { return c.newSession(transport.UploadPackServiceName, ep, auth) } // NewReceivePackSession creates a new ReceivePackSession. func (c *client) NewReceivePackSession(ep *transport.Endpoint, auth transport.AuthMethod) ( transport.ReceivePackSession, error) { return c.newSession(transport.ReceivePackServiceName, ep, auth) } type session struct { Stdin io.WriteCloser Stdout io.Reader Command Command isReceivePack bool advRefs *packp.AdvRefs packRun bool finished bool firstErrLine chan string } func (c *client) newSession(s string, ep *transport.Endpoint, auth transport.AuthMethod) (*session, error) { cmd, err := c.cmdr.Command(s, ep, auth) if err != nil { return nil, err } stdin, err := cmd.StdinPipe() if err != nil { return nil, err } stdout, err := cmd.StdoutPipe() if err != nil { return nil, err } stderr, err := cmd.StderrPipe() if err != nil { return nil, err } if err := cmd.Start(); err != nil { return nil, err } return &session{ Stdin: stdin, Stdout: stdout, Command: cmd, firstErrLine: c.listenFirstError(stderr), isReceivePack: s == transport.ReceivePackServiceName, }, nil } func (c *client) listenFirstError(r io.Reader) chan string { if r == nil { return nil } errLine := make(chan string, 1) go func() { s := bufio.NewScanner(r) for { if s.Scan() { line := s.Text() if !stdErrSkipPattern.MatchString(line) { errLine <- line break } } else { close(errLine) break } } _, _ = io.Copy(io.Discard, r) }() return errLine } func (s *session) AdvertisedReferences() (*packp.AdvRefs, error) { return s.AdvertisedReferencesContext(context.TODO()) } // AdvertisedReferences retrieves the advertised references from the server. func (s *session) AdvertisedReferencesContext(ctx context.Context) (*packp.AdvRefs, error) { if s.advRefs != nil { return s.advRefs, nil } ar := packp.NewAdvRefs() if err := ar.Decode(s.StdoutContext(ctx)); err != nil { if err := s.handleAdvRefDecodeError(err); err != nil { return nil, err } } // Some servers like jGit, announce capabilities instead of returning an // packp message with a flush. This verifies that we received a empty // adv-refs, even it contains capabilities. if !s.isReceivePack && ar.IsEmpty() { return nil, transport.ErrEmptyRemoteRepository } transport.FilterUnsupportedCapabilities(ar.Capabilities) s.advRefs = ar return ar, nil } func (s *session) handleAdvRefDecodeError(err error) error { var errLine *pktline.ErrorLine if errors.As(err, &errLine) { if isRepoNotFoundError(errLine.Text) { return transport.ErrRepositoryNotFound } return errLine } // If repository is not found, we get empty stdout and server writes an // error to stderr. if errors.Is(err, packp.ErrEmptyInput) { // TODO:(v6): handle this error in a better way. // Instead of checking the stderr output for a specific error message, // define an ExitError and embed the stderr output and exit (if one // exists) in the error struct. Just like exec.ExitError. s.finished = true if err := s.checkNotFoundError(); err != nil { return err } return io.ErrUnexpectedEOF } // For empty (but existing) repositories, we get empty advertised-references // message. But valid. That is, it includes at least a flush. if err == packp.ErrEmptyAdvRefs { // Empty repositories are valid for git-receive-pack. if s.isReceivePack { return nil } if err := s.finish(); err != nil { return err } return transport.ErrEmptyRemoteRepository } // Some server sends the errors as normal content (git protocol), so when // we try to decode it fails, we need to check the content of it, to detect // not found errors if uerr, ok := err.(*packp.ErrUnexpectedData); ok { if isRepoNotFoundError(string(uerr.Data)) { return transport.ErrRepositoryNotFound } } return err } // UploadPack performs a request to the server to fetch a packfile. A reader is // returned with the packfile content. The reader must be closed after reading. func (s *session) UploadPack(ctx context.Context, req *packp.UploadPackRequest) (*packp.UploadPackResponse, error) { if req.IsEmpty() { // XXX: IsEmpty means haves are a subset of wants, in that case we have // everything we asked for. Close the connection and return nil. if err := s.finish(); err != nil { return nil, err } // TODO:(v6) return nil here return nil, transport.ErrEmptyUploadPackRequest } if err := req.Validate(); err != nil { return nil, err } if _, err := s.AdvertisedReferencesContext(ctx); err != nil { return nil, err } s.packRun = true in := s.StdinContext(ctx) out := s.StdoutContext(ctx) if err := uploadPack(in, out, req); err != nil { return nil, err } r, err := ioutil.NonEmptyReader(out) if err == ioutil.ErrEmptyReader { if c, ok := s.Stdout.(io.Closer); ok { _ = c.Close() } return nil, transport.ErrEmptyUploadPackRequest } if err != nil { return nil, err } rc := ioutil.NewReadCloser(r, s) return DecodeUploadPackResponse(rc, req) } func (s *session) StdinContext(ctx context.Context) io.WriteCloser { return ioutil.NewWriteCloserOnError( ioutil.NewContextWriteCloser(ctx, s.Stdin), s.onError, ) } func (s *session) StdoutContext(ctx context.Context) io.Reader { return ioutil.NewReaderOnError( ioutil.NewContextReader(ctx, s.Stdout), s.onError, ) } func (s *session) onError(err error) { if k, ok := s.Command.(CommandKiller); ok { _ = k.Kill() } _ = s.Close() } func (s *session) ReceivePack(ctx context.Context, req *packp.ReferenceUpdateRequest) (*packp.ReportStatus, error) { if _, err := s.AdvertisedReferences(); err != nil { return nil, err } s.packRun = true w := s.StdinContext(ctx) if err := req.Encode(w); err != nil { return nil, err } if err := w.Close(); err != nil { return nil, err } if !req.Capabilities.Supports(capability.ReportStatus) { // If we don't have report-status, we can only // check return value error. return nil, s.Command.Close() } r := s.StdoutContext(ctx) var d *sideband.Demuxer if req.Capabilities.Supports(capability.Sideband64k) { d = sideband.NewDemuxer(sideband.Sideband64k, r) } else if req.Capabilities.Supports(capability.Sideband) { d = sideband.NewDemuxer(sideband.Sideband, r) } if d != nil { d.Progress = req.Progress r = d } report := packp.NewReportStatus() if err := report.Decode(r); err != nil { return nil, err } if err := report.Error(); err != nil { defer s.Close() return report, err } return report, s.Command.Close() } func (s *session) finish() error { if s.finished { return nil } s.finished = true // If we did not run a upload/receive-pack, we close the connection // gracefully by sending a flush packet to the server. If the server // operates correctly, it will exit with status 0. if !s.packRun { _, err := s.Stdin.Write(pktline.FlushPkt) return err } return nil } func (s *session) Close() (err error) { err = s.finish() defer ioutil.CheckClose(s.Command, &err) return } func (s *session) checkNotFoundError() error { t := time.NewTicker(time.Second * readErrorSecondsTimeout) defer t.Stop() select { case <-t.C: return ErrTimeoutExceeded case line, ok := <-s.firstErrLine: if !ok || len(line) == 0 { return nil } if isRepoNotFoundError(line) { return transport.ErrRepositoryNotFound } // TODO:(v6): return server error just as it is without a prefix return fmt.Errorf("unknown error: %s", line) } } const ( githubRepoNotFoundErr = "Repository not found." bitbucketRepoNotFoundErr = "repository does not exist." localRepoNotFoundErr = "does not appear to be a git repository" gitProtocolNotFoundErr = "Repository not found." gitProtocolNoSuchErr = "no such repository" gitProtocolAccessDeniedErr = "access denied" gogsAccessDeniedErr = "Repository does not exist or you do not have access" gitlabRepoNotFoundErr = "The project you were looking for could not be found" ) func isRepoNotFoundError(s string) bool { for _, err := range []string{ githubRepoNotFoundErr, bitbucketRepoNotFoundErr, localRepoNotFoundErr, gitProtocolNotFoundErr, gitProtocolNoSuchErr, gitProtocolAccessDeniedErr, gogsAccessDeniedErr, gitlabRepoNotFoundErr, } { if strings.Contains(s, err) { return true } } return false } // uploadPack implements the git-upload-pack protocol. func uploadPack(w io.WriteCloser, _ io.Reader, req *packp.UploadPackRequest) error { // TODO support multi_ack mode // TODO support multi_ack_detailed mode // TODO support acks for common objects // TODO build a proper state machine for all these processing options if err := req.UploadRequest.Encode(w); err != nil { return fmt.Errorf("sending upload-req message: %s", err) } if err := req.UploadHaves.Encode(w, true); err != nil { return fmt.Errorf("sending haves message: %s", err) } if err := sendDone(w); err != nil { return fmt.Errorf("sending done message: %s", err) } if err := w.Close(); err != nil { return fmt.Errorf("closing input: %s", err) } return nil } func sendDone(w io.Writer) error { e := pktline.NewEncoder(w) return e.Encodef("done\n") } // DecodeUploadPackResponse decodes r into a new packp.UploadPackResponse func DecodeUploadPackResponse(r io.ReadCloser, req *packp.UploadPackRequest) ( *packp.UploadPackResponse, error, ) { res := packp.NewUploadPackResponse(req) if err := res.Decode(r); err != nil { return nil, fmt.Errorf("error decoding upload-pack response: %s", err) } return res, nil }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/jesseduffield/go-git/v5/plumbing/transport/server/loader.go
vendor/github.com/jesseduffield/go-git/v5/plumbing/transport/server/loader.go
package server import ( "github.com/jesseduffield/go-git/v5/plumbing/cache" "github.com/jesseduffield/go-git/v5/plumbing/storer" "github.com/jesseduffield/go-git/v5/plumbing/transport" "github.com/jesseduffield/go-git/v5/storage/filesystem" "github.com/go-git/go-billy/v5" "github.com/go-git/go-billy/v5/osfs" ) // DefaultLoader is a filesystem loader ignoring host and resolving paths to /. var DefaultLoader = NewFilesystemLoader(osfs.New("")) // Loader loads repository's storer.Storer based on an optional host and a path. type Loader interface { // Load loads a storer.Storer given a transport.Endpoint. // Returns transport.ErrRepositoryNotFound if the repository does not // exist. Load(ep *transport.Endpoint) (storer.Storer, error) } type fsLoader struct { base billy.Filesystem } // NewFilesystemLoader creates a Loader that ignores host and resolves paths // with a given base filesystem. func NewFilesystemLoader(base billy.Filesystem) Loader { return &fsLoader{base} } // Load looks up the endpoint's path in the base file system and returns a // storer for it. Returns transport.ErrRepositoryNotFound if a repository does // not exist in the given path. func (l *fsLoader) Load(ep *transport.Endpoint) (storer.Storer, error) { fs, err := l.base.Chroot(ep.Path) if err != nil { return nil, err } var bare bool if _, err := fs.Stat("config"); err == nil { bare = true } if !bare { // do not use git.GitDirName due to import cycle if _, err := fs.Stat(".git"); err != nil { return nil, transport.ErrRepositoryNotFound } } return filesystem.NewStorage(fs, cache.NewObjectLRUDefault()), nil } // MapLoader is a Loader that uses a lookup map of storer.Storer by // transport.Endpoint. type MapLoader map[string]storer.Storer // Load returns a storer.Storer for given a transport.Endpoint by looking it up // in the map. Returns transport.ErrRepositoryNotFound if the endpoint does not // exist. func (l MapLoader) Load(ep *transport.Endpoint) (storer.Storer, error) { s, ok := l[ep.String()] if !ok { return nil, transport.ErrRepositoryNotFound } return s, nil }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/jesseduffield/go-git/v5/plumbing/transport/server/server.go
vendor/github.com/jesseduffield/go-git/v5/plumbing/transport/server/server.go
// Package server implements the git server protocol. For most use cases, the // transport-specific implementations should be used. package server import ( "context" "errors" "fmt" "io" "github.com/jesseduffield/go-git/v5/plumbing" "github.com/jesseduffield/go-git/v5/plumbing/format/packfile" "github.com/jesseduffield/go-git/v5/plumbing/protocol/packp" "github.com/jesseduffield/go-git/v5/plumbing/protocol/packp/capability" "github.com/jesseduffield/go-git/v5/plumbing/revlist" "github.com/jesseduffield/go-git/v5/plumbing/storer" "github.com/jesseduffield/go-git/v5/plumbing/transport" "github.com/jesseduffield/go-git/v5/utils/ioutil" ) var DefaultServer = NewServer(DefaultLoader) type server struct { loader Loader handler *handler } // NewServer returns a transport.Transport implementing a git server, // independent of transport. Each transport must wrap this. func NewServer(loader Loader) transport.Transport { return &server{ loader, &handler{asClient: false}, } } // NewClient returns a transport.Transport implementing a client with an // embedded server. func NewClient(loader Loader) transport.Transport { return &server{ loader, &handler{asClient: true}, } } func (s *server) NewUploadPackSession(ep *transport.Endpoint, auth transport.AuthMethod) (transport.UploadPackSession, error) { sto, err := s.loader.Load(ep) if err != nil { return nil, err } return s.handler.NewUploadPackSession(sto) } func (s *server) NewReceivePackSession(ep *transport.Endpoint, auth transport.AuthMethod) (transport.ReceivePackSession, error) { sto, err := s.loader.Load(ep) if err != nil { return nil, err } return s.handler.NewReceivePackSession(sto) } type handler struct { asClient bool } func (h *handler) NewUploadPackSession(s storer.Storer) (transport.UploadPackSession, error) { return &upSession{ session: session{storer: s, asClient: h.asClient}, }, nil } func (h *handler) NewReceivePackSession(s storer.Storer) (transport.ReceivePackSession, error) { return &rpSession{ session: session{storer: s, asClient: h.asClient}, cmdStatus: map[plumbing.ReferenceName]error{}, }, nil } type session struct { storer storer.Storer caps *capability.List asClient bool } func (s *session) Close() error { return nil } func (s *session) SetAuth(transport.AuthMethod) error { //TODO: deprecate return nil } func (s *session) checkSupportedCapabilities(cl *capability.List) error { for _, c := range cl.All() { if !s.caps.Supports(c) { return fmt.Errorf("unsupported capability: %s", c) } } return nil } type upSession struct { session } func (s *upSession) AdvertisedReferences() (*packp.AdvRefs, error) { return s.AdvertisedReferencesContext(context.TODO()) } func (s *upSession) AdvertisedReferencesContext(ctx context.Context) (*packp.AdvRefs, error) { ar := packp.NewAdvRefs() if err := s.setSupportedCapabilities(ar.Capabilities); err != nil { return nil, err } s.caps = ar.Capabilities if err := setReferences(s.storer, ar); err != nil { return nil, err } if err := setHEAD(s.storer, ar); err != nil { return nil, err } if s.asClient && len(ar.References) == 0 { return nil, transport.ErrEmptyRemoteRepository } return ar, nil } func (s *upSession) UploadPack(ctx context.Context, req *packp.UploadPackRequest) (*packp.UploadPackResponse, error) { if req.IsEmpty() { return nil, transport.ErrEmptyUploadPackRequest } if err := req.Validate(); err != nil { return nil, err } if s.caps == nil { s.caps = capability.NewList() if err := s.setSupportedCapabilities(s.caps); err != nil { return nil, err } } if err := s.checkSupportedCapabilities(req.Capabilities); err != nil { return nil, err } s.caps = req.Capabilities if len(req.Shallows) > 0 { return nil, fmt.Errorf("shallow not supported") } objs, err := s.objectsToUpload(req) if err != nil { return nil, err } pr, pw := io.Pipe() e := packfile.NewEncoder(pw, s.storer, false) go func() { // TODO: plumb through a pack window. _, err := e.Encode(objs, 10) pw.CloseWithError(err) }() return packp.NewUploadPackResponseWithPackfile(req, ioutil.NewContextReadCloser(ctx, pr), ), nil } func (s *upSession) objectsToUpload(req *packp.UploadPackRequest) ([]plumbing.Hash, error) { haves, err := revlist.Objects(s.storer, req.Haves, nil) if err != nil { return nil, err } return revlist.Objects(s.storer, req.Wants, haves) } func (*upSession) setSupportedCapabilities(c *capability.List) error { if err := c.Set(capability.Agent, capability.DefaultAgent()); err != nil { return err } if err := c.Set(capability.OFSDelta); err != nil { return err } return nil } type rpSession struct { session cmdStatus map[plumbing.ReferenceName]error firstErr error unpackErr error } func (s *rpSession) AdvertisedReferences() (*packp.AdvRefs, error) { return s.AdvertisedReferencesContext(context.TODO()) } func (s *rpSession) AdvertisedReferencesContext(ctx context.Context) (*packp.AdvRefs, error) { ar := packp.NewAdvRefs() if err := s.setSupportedCapabilities(ar.Capabilities); err != nil { return nil, err } s.caps = ar.Capabilities if err := setReferences(s.storer, ar); err != nil { return nil, err } if err := setHEAD(s.storer, ar); err != nil { return nil, err } return ar, nil } var ( ErrUpdateReference = errors.New("failed to update ref") ) func (s *rpSession) ReceivePack(ctx context.Context, req *packp.ReferenceUpdateRequest) (*packp.ReportStatus, error) { if s.caps == nil { s.caps = capability.NewList() if err := s.setSupportedCapabilities(s.caps); err != nil { return nil, err } } if err := s.checkSupportedCapabilities(req.Capabilities); err != nil { return nil, err } s.caps = req.Capabilities //TODO: Implement 'atomic' update of references. if req.Packfile != nil { r := ioutil.NewContextReadCloser(ctx, req.Packfile) if err := s.writePackfile(r); err != nil { s.unpackErr = err s.firstErr = err return s.reportStatus(), err } } s.updateReferences(req) return s.reportStatus(), s.firstErr } func (s *rpSession) updateReferences(req *packp.ReferenceUpdateRequest) { for _, cmd := range req.Commands { exists, err := referenceExists(s.storer, cmd.Name) if err != nil { s.setStatus(cmd.Name, err) continue } switch cmd.Action() { case packp.Create: if exists { s.setStatus(cmd.Name, ErrUpdateReference) continue } ref := plumbing.NewHashReference(cmd.Name, cmd.New) err := s.storer.SetReference(ref) s.setStatus(cmd.Name, err) case packp.Delete: if !exists { s.setStatus(cmd.Name, ErrUpdateReference) continue } err := s.storer.RemoveReference(cmd.Name) s.setStatus(cmd.Name, err) case packp.Update: if !exists { s.setStatus(cmd.Name, ErrUpdateReference) continue } ref := plumbing.NewHashReference(cmd.Name, cmd.New) err := s.storer.SetReference(ref) s.setStatus(cmd.Name, err) } } } func (s *rpSession) writePackfile(r io.ReadCloser) error { if r == nil { return nil } if err := packfile.UpdateObjectStorage(s.storer, r); err != nil { _ = r.Close() return err } return r.Close() } func (s *rpSession) setStatus(ref plumbing.ReferenceName, err error) { s.cmdStatus[ref] = err if s.firstErr == nil && err != nil { s.firstErr = err } } func (s *rpSession) reportStatus() *packp.ReportStatus { if !s.caps.Supports(capability.ReportStatus) { return nil } rs := packp.NewReportStatus() rs.UnpackStatus = "ok" if s.unpackErr != nil { rs.UnpackStatus = s.unpackErr.Error() } if s.cmdStatus == nil { return rs } for ref, err := range s.cmdStatus { msg := "ok" if err != nil { msg = err.Error() } status := &packp.CommandStatus{ ReferenceName: ref, Status: msg, } rs.CommandStatuses = append(rs.CommandStatuses, status) } return rs } func (*rpSession) setSupportedCapabilities(c *capability.List) error { if err := c.Set(capability.Agent, capability.DefaultAgent()); err != nil { return err } if err := c.Set(capability.OFSDelta); err != nil { return err } if err := c.Set(capability.DeleteRefs); err != nil { return err } return c.Set(capability.ReportStatus) } func setHEAD(s storer.Storer, ar *packp.AdvRefs) error { ref, err := s.Reference(plumbing.HEAD) if err == plumbing.ErrReferenceNotFound { return nil } if err != nil { return err } if ref.Type() == plumbing.SymbolicReference { if err := ar.AddReference(ref); err != nil { return nil } ref, err = storer.ResolveReference(s, ref.Target()) if err == plumbing.ErrReferenceNotFound { return nil } if err != nil { return err } } if ref.Type() != plumbing.HashReference { return plumbing.ErrInvalidType } h := ref.Hash() ar.Head = &h return nil } func setReferences(s storer.Storer, ar *packp.AdvRefs) error { //TODO: add peeled references. iter, err := s.IterReferences() if err != nil { return err } return iter.ForEach(func(ref *plumbing.Reference) error { if ref.Type() != plumbing.HashReference { return nil } ar.References[ref.Name().String()] = ref.Hash() return nil }) } func referenceExists(s storer.ReferenceStorer, n plumbing.ReferenceName) (bool, error) { _, err := s.Reference(n) if err == plumbing.ErrReferenceNotFound { return false, nil } return err == nil, err }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/jesseduffield/go-git/v5/plumbing/transport/client/client.go
vendor/github.com/jesseduffield/go-git/v5/plumbing/transport/client/client.go
// Package client contains helper function to deal with the different client // protocols. package client import ( "fmt" "github.com/jesseduffield/go-git/v5/plumbing/transport" "github.com/jesseduffield/go-git/v5/plumbing/transport/file" "github.com/jesseduffield/go-git/v5/plumbing/transport/git" "github.com/jesseduffield/go-git/v5/plumbing/transport/http" "github.com/jesseduffield/go-git/v5/plumbing/transport/ssh" ) // Protocols are the protocols supported by default. var Protocols = map[string]transport.Transport{ "http": http.DefaultClient, "https": http.DefaultClient, "ssh": ssh.DefaultClient, "git": git.DefaultClient, "file": file.DefaultClient, } // InstallProtocol adds or modifies an existing protocol. func InstallProtocol(scheme string, c transport.Transport) { if c == nil { delete(Protocols, scheme) return } Protocols[scheme] = c } // NewClient returns the appropriate client among of the set of known protocols: // http://, https://, ssh:// and file://. // See `InstallProtocol` to add or modify protocols. func NewClient(endpoint *transport.Endpoint) (transport.Transport, error) { return getTransport(endpoint) } func getTransport(endpoint *transport.Endpoint) (transport.Transport, error) { f, ok := Protocols[endpoint.Protocol] if !ok { return nil, fmt.Errorf("unsupported scheme %q", endpoint.Protocol) } if f == nil { return nil, fmt.Errorf("malformed client for scheme %q, client is defined as nil", endpoint.Protocol) } return f, nil }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/jesseduffield/go-git/v5/plumbing/transport/http/receive_pack.go
vendor/github.com/jesseduffield/go-git/v5/plumbing/transport/http/receive_pack.go
package http import ( "bytes" "context" "fmt" "io" "net/http" "github.com/jesseduffield/go-git/v5/plumbing" "github.com/jesseduffield/go-git/v5/plumbing/protocol/packp" "github.com/jesseduffield/go-git/v5/plumbing/protocol/packp/capability" "github.com/jesseduffield/go-git/v5/plumbing/protocol/packp/sideband" "github.com/jesseduffield/go-git/v5/plumbing/transport" "github.com/jesseduffield/go-git/v5/utils/ioutil" ) type rpSession struct { *session } func newReceivePackSession(c *client, ep *transport.Endpoint, auth transport.AuthMethod) (transport.ReceivePackSession, error) { s, err := newSession(c, ep, auth) return &rpSession{s}, err } func (s *rpSession) AdvertisedReferences() (*packp.AdvRefs, error) { return advertisedReferences(context.TODO(), s.session, transport.ReceivePackServiceName) } func (s *rpSession) AdvertisedReferencesContext(ctx context.Context) (*packp.AdvRefs, error) { return advertisedReferences(ctx, s.session, transport.ReceivePackServiceName) } func (s *rpSession) ReceivePack(ctx context.Context, req *packp.ReferenceUpdateRequest) ( *packp.ReportStatus, error) { url := fmt.Sprintf( "%s/%s", s.endpoint.String(), transport.ReceivePackServiceName, ) buf := bytes.NewBuffer(nil) if err := req.Encode(buf); err != nil { return nil, err } res, err := s.doRequest(ctx, http.MethodPost, url, buf) if err != nil { return nil, err } r, err := ioutil.NonEmptyReader(res.Body) if err == ioutil.ErrEmptyReader { return nil, nil } if err != nil { return nil, err } var d *sideband.Demuxer if req.Capabilities.Supports(capability.Sideband64k) { d = sideband.NewDemuxer(sideband.Sideband64k, r) } else if req.Capabilities.Supports(capability.Sideband) { d = sideband.NewDemuxer(sideband.Sideband, r) } if d != nil { d.Progress = req.Progress r = d } rc := ioutil.NewReadCloser(r, res.Body) report := packp.NewReportStatus() if err := report.Decode(rc); err != nil { return nil, err } return report, report.Error() } func (s *rpSession) doRequest( ctx context.Context, method, url string, content *bytes.Buffer, ) (*http.Response, error) { var body io.Reader if content != nil { body = content } req, err := http.NewRequest(method, url, body) if err != nil { return nil, plumbing.NewPermanentError(err) } applyHeadersToRequest(req, content, s.endpoint.Host, transport.ReceivePackServiceName) s.ApplyAuthToRequest(req) res, err := s.client.Do(req.WithContext(ctx)) if err != nil { return nil, plumbing.NewUnexpectedError(err) } if err := NewErr(res); err != nil { return nil, err } return res, nil }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/jesseduffield/go-git/v5/plumbing/transport/http/upload_pack.go
vendor/github.com/jesseduffield/go-git/v5/plumbing/transport/http/upload_pack.go
package http import ( "bytes" "context" "fmt" "io" "net/http" "github.com/jesseduffield/go-git/v5/plumbing" "github.com/jesseduffield/go-git/v5/plumbing/format/pktline" "github.com/jesseduffield/go-git/v5/plumbing/protocol/packp" "github.com/jesseduffield/go-git/v5/plumbing/transport" "github.com/jesseduffield/go-git/v5/plumbing/transport/internal/common" "github.com/jesseduffield/go-git/v5/utils/ioutil" ) type upSession struct { *session } func newUploadPackSession(c *client, ep *transport.Endpoint, auth transport.AuthMethod) (transport.UploadPackSession, error) { s, err := newSession(c, ep, auth) return &upSession{s}, err } func (s *upSession) AdvertisedReferences() (*packp.AdvRefs, error) { return advertisedReferences(context.TODO(), s.session, transport.UploadPackServiceName) } func (s *upSession) AdvertisedReferencesContext(ctx context.Context) (*packp.AdvRefs, error) { return advertisedReferences(ctx, s.session, transport.UploadPackServiceName) } func (s *upSession) UploadPack( ctx context.Context, req *packp.UploadPackRequest, ) (*packp.UploadPackResponse, error) { if req.IsEmpty() { return nil, transport.ErrEmptyUploadPackRequest } if err := req.Validate(); err != nil { return nil, err } url := fmt.Sprintf( "%s/%s", s.endpoint.String(), transport.UploadPackServiceName, ) content, err := uploadPackRequestToReader(req) if err != nil { return nil, err } res, err := s.doRequest(ctx, http.MethodPost, url, content) if err != nil { return nil, err } r, err := ioutil.NonEmptyReader(res.Body) if err != nil { if err == ioutil.ErrEmptyReader || err == io.ErrUnexpectedEOF { return nil, transport.ErrEmptyUploadPackRequest } return nil, err } rc := ioutil.NewReadCloser(r, res.Body) return common.DecodeUploadPackResponse(rc, req) } // Close does nothing. func (s *upSession) Close() error { return nil } func (s *upSession) doRequest( ctx context.Context, method, url string, content *bytes.Buffer, ) (*http.Response, error) { var body io.Reader if content != nil { body = content } req, err := http.NewRequest(method, url, body) if err != nil { return nil, plumbing.NewPermanentError(err) } applyHeadersToRequest(req, content, s.endpoint.Host, transport.UploadPackServiceName) s.ApplyAuthToRequest(req) res, err := s.client.Do(req.WithContext(ctx)) if err != nil { return nil, plumbing.NewUnexpectedError(err) } if err := NewErr(res); err != nil { return nil, err } return res, nil } func uploadPackRequestToReader(req *packp.UploadPackRequest) (*bytes.Buffer, error) { buf := bytes.NewBuffer(nil) e := pktline.NewEncoder(buf) if err := req.UploadRequest.Encode(buf); err != nil { return nil, fmt.Errorf("sending upload-req message: %s", err) } if err := req.UploadHaves.Encode(buf, false); err != nil { return nil, fmt.Errorf("sending haves message: %s", err) } if err := e.EncodeString("done\n"); err != nil { return nil, err } return buf, nil }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/jesseduffield/go-git/v5/plumbing/transport/http/transport.go
vendor/github.com/jesseduffield/go-git/v5/plumbing/transport/http/transport.go
package http import ( "net/http" "net/url" ) // transportOptions contains transport specific configuration. type transportOptions struct { insecureSkipTLS bool // []byte is not comparable. caBundle string proxyURL url.URL } func (c *client) addTransport(opts transportOptions, transport *http.Transport) { c.mutex.Lock() c.transports.Add(opts, transport) c.mutex.Unlock() } func (c *client) removeTransport(opts transportOptions) { c.mutex.Lock() c.transports.Remove(opts) c.mutex.Unlock() } func (c *client) fetchTransport(opts transportOptions) (*http.Transport, bool) { c.mutex.RLock() t, ok := c.transports.Get(opts) c.mutex.RUnlock() if !ok { return nil, false } transport, ok := t.(*http.Transport) if !ok { return nil, false } return transport, true }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/jesseduffield/go-git/v5/plumbing/transport/http/common.go
vendor/github.com/jesseduffield/go-git/v5/plumbing/transport/http/common.go
// Package http implements the HTTP transport protocol. package http import ( "bytes" "context" "crypto/tls" "crypto/x509" "fmt" "net" "net/http" "net/url" "reflect" "strconv" "strings" "sync" "github.com/jesseduffield/go-git/v5/plumbing" "github.com/jesseduffield/go-git/v5/plumbing/protocol/packp" "github.com/jesseduffield/go-git/v5/plumbing/protocol/packp/capability" "github.com/jesseduffield/go-git/v5/plumbing/transport" "github.com/jesseduffield/go-git/v5/utils/ioutil" "github.com/golang/groupcache/lru" ) // it requires a bytes.Buffer, because we need to know the length func applyHeadersToRequest(req *http.Request, content *bytes.Buffer, host string, requestType string) { req.Header.Add("User-Agent", capability.DefaultAgent()) req.Header.Add("Host", host) // host:port if content == nil { req.Header.Add("Accept", "*/*") return } req.Header.Add("Accept", fmt.Sprintf("application/x-%s-result", requestType)) req.Header.Add("Content-Type", fmt.Sprintf("application/x-%s-request", requestType)) req.Header.Add("Content-Length", strconv.Itoa(content.Len())) } const infoRefsPath = "/info/refs" func advertisedReferences(ctx context.Context, s *session, serviceName string) (ref *packp.AdvRefs, err error) { url := fmt.Sprintf( "%s%s?service=%s", s.endpoint.String(), infoRefsPath, serviceName, ) req, err := http.NewRequest(http.MethodGet, url, nil) if err != nil { return nil, err } s.ApplyAuthToRequest(req) applyHeadersToRequest(req, nil, s.endpoint.Host, serviceName) res, err := s.client.Do(req.WithContext(ctx)) if err != nil { return nil, err } s.ModifyEndpointIfRedirect(res) defer ioutil.CheckClose(res.Body, &err) if err = NewErr(res); err != nil { return nil, err } ar := packp.NewAdvRefs() if err = ar.Decode(res.Body); err != nil { if err == packp.ErrEmptyAdvRefs { err = transport.ErrEmptyRemoteRepository } return nil, err } // Git 2.41+ returns a zero-id plus capabilities when an empty // repository is being cloned. This skips the existing logic within // advrefs_decode.decodeFirstHash, which expects a flush-pkt instead. // // This logic aligns with plumbing/transport/internal/common/common.go. if ar.IsEmpty() && // Empty repositories are valid for git-receive-pack. transport.ReceivePackServiceName != serviceName { return nil, transport.ErrEmptyRemoteRepository } transport.FilterUnsupportedCapabilities(ar.Capabilities) s.advRefs = ar return ar, nil } type client struct { client *http.Client transports *lru.Cache mutex sync.RWMutex } // ClientOptions holds user configurable options for the client. type ClientOptions struct { // CacheMaxEntries is the max no. of entries that the transport objects // cache will hold at any given point of time. It must be a positive integer. // Calling `client.addTransport()` after the cache has reached the specified // size, will result in the least recently used transport getting deleted // before the provided transport is added to the cache. CacheMaxEntries int } var ( // defaultTransportCacheSize is the default capacity of the transport objects cache. // Its value is 0 because transport caching is turned off by default and is an // opt-in feature. defaultTransportCacheSize = 0 // DefaultClient is the default HTTP client, which uses a net/http client configured // with http.DefaultTransport. DefaultClient = NewClient(nil) ) // NewClient creates a new client with a custom net/http client. // See `InstallProtocol` to install and override default http client. // If the net/http client is nil or empty, it will use a net/http client configured // with http.DefaultTransport. // // Note that for HTTP client cannot distinguish between private repositories and // unexistent repositories on GitHub. So it returns `ErrAuthorizationRequired` // for both. func NewClient(c *http.Client) transport.Transport { if c == nil { c = &http.Client{ Transport: http.DefaultTransport, } } return NewClientWithOptions(c, &ClientOptions{ CacheMaxEntries: defaultTransportCacheSize, }) } // NewClientWithOptions returns a new client configured with the provided net/http client // and other custom options specific to the client. // If the net/http client is nil or empty, it will use a net/http client configured // with http.DefaultTransport. func NewClientWithOptions(c *http.Client, opts *ClientOptions) transport.Transport { if c == nil { c = &http.Client{ Transport: http.DefaultTransport, } } cl := &client{ client: c, } if opts != nil { if opts.CacheMaxEntries > 0 { cl.transports = lru.New(opts.CacheMaxEntries) } } return cl } func (c *client) NewUploadPackSession(ep *transport.Endpoint, auth transport.AuthMethod) ( transport.UploadPackSession, error) { return newUploadPackSession(c, ep, auth) } func (c *client) NewReceivePackSession(ep *transport.Endpoint, auth transport.AuthMethod) ( transport.ReceivePackSession, error) { return newReceivePackSession(c, ep, auth) } type session struct { auth AuthMethod client *http.Client endpoint *transport.Endpoint advRefs *packp.AdvRefs } func transportWithInsecureTLS(transport *http.Transport) { if transport.TLSClientConfig == nil { transport.TLSClientConfig = &tls.Config{} } transport.TLSClientConfig.InsecureSkipVerify = true } func transportWithCABundle(transport *http.Transport, caBundle []byte) error { rootCAs, err := x509.SystemCertPool() if err != nil { return err } if rootCAs == nil { rootCAs = x509.NewCertPool() } rootCAs.AppendCertsFromPEM(caBundle) if transport.TLSClientConfig == nil { transport.TLSClientConfig = &tls.Config{} } transport.TLSClientConfig.RootCAs = rootCAs return nil } func transportWithProxy(transport *http.Transport, proxyURL *url.URL) { transport.Proxy = http.ProxyURL(proxyURL) } func configureTransport(transport *http.Transport, ep *transport.Endpoint) error { if len(ep.CaBundle) > 0 { if err := transportWithCABundle(transport, ep.CaBundle); err != nil { return err } } if ep.InsecureSkipTLS { transportWithInsecureTLS(transport) } if ep.Proxy.URL != "" { proxyURL, err := ep.Proxy.FullURL() if err != nil { return err } transportWithProxy(transport, proxyURL) } return nil } func newSession(c *client, ep *transport.Endpoint, auth transport.AuthMethod) (*session, error) { var httpClient *http.Client // We need to configure the http transport if there are transport specific // options present in the endpoint. if len(ep.CaBundle) > 0 || ep.InsecureSkipTLS || ep.Proxy.URL != "" { var transport *http.Transport // if the client wasn't configured to have a cache for transports then just configure // the transport and use it directly, otherwise try to use the cache. if c.transports == nil { tr, ok := c.client.Transport.(*http.Transport) if !ok { return nil, fmt.Errorf("expected underlying client transport to be of type: %s; got: %s", reflect.TypeOf(transport), reflect.TypeOf(c.client.Transport)) } transport = tr.Clone() configureTransport(transport, ep) } else { transportOpts := transportOptions{ caBundle: string(ep.CaBundle), insecureSkipTLS: ep.InsecureSkipTLS, } if ep.Proxy.URL != "" { proxyURL, err := ep.Proxy.FullURL() if err != nil { return nil, err } transportOpts.proxyURL = *proxyURL } var found bool transport, found = c.fetchTransport(transportOpts) if !found { transport = c.client.Transport.(*http.Transport).Clone() configureTransport(transport, ep) c.addTransport(transportOpts, transport) } } httpClient = &http.Client{ Transport: transport, CheckRedirect: c.client.CheckRedirect, Jar: c.client.Jar, Timeout: c.client.Timeout, } } else { httpClient = c.client } s := &session{ auth: basicAuthFromEndpoint(ep), client: httpClient, endpoint: ep, } if auth != nil { a, ok := auth.(AuthMethod) if !ok { return nil, transport.ErrInvalidAuthMethod } s.auth = a } return s, nil } func (s *session) ApplyAuthToRequest(req *http.Request) { if s.auth == nil { return } s.auth.SetAuth(req) } func (s *session) ModifyEndpointIfRedirect(res *http.Response) { if res.Request == nil { return } r := res.Request if !strings.HasSuffix(r.URL.Path, infoRefsPath) { return } h, p, err := net.SplitHostPort(r.URL.Host) if err != nil { h = r.URL.Host } if p != "" { port, err := strconv.Atoi(p) if err == nil { s.endpoint.Port = port } } s.endpoint.Host = h s.endpoint.Protocol = r.URL.Scheme s.endpoint.Path = r.URL.Path[:len(r.URL.Path)-len(infoRefsPath)] } func (*session) Close() error { return nil } // AuthMethod is concrete implementation of common.AuthMethod for HTTP services type AuthMethod interface { transport.AuthMethod SetAuth(r *http.Request) } func basicAuthFromEndpoint(ep *transport.Endpoint) *BasicAuth { u := ep.User if u == "" { return nil } return &BasicAuth{u, ep.Password} } // BasicAuth represent a HTTP basic auth type BasicAuth struct { Username, Password string } func (a *BasicAuth) SetAuth(r *http.Request) { if a == nil { return } r.SetBasicAuth(a.Username, a.Password) } // Name is name of the auth func (a *BasicAuth) Name() string { return "http-basic-auth" } func (a *BasicAuth) String() string { masked := "*******" if a.Password == "" { masked = "<empty>" } return fmt.Sprintf("%s - %s:%s", a.Name(), a.Username, masked) } // TokenAuth implements an http.AuthMethod that can be used with http transport // to authenticate with HTTP token authentication (also known as bearer // authentication). // // IMPORTANT: If you are looking to use OAuth tokens with popular servers (e.g. // GitHub, Bitbucket, GitLab) you should use BasicAuth instead. These servers // use basic HTTP authentication, with the OAuth token as user or password. // Check the documentation of your git server for details. type TokenAuth struct { Token string } func (a *TokenAuth) SetAuth(r *http.Request) { if a == nil { return } r.Header.Add("Authorization", fmt.Sprintf("Bearer %s", a.Token)) } // Name is name of the auth func (a *TokenAuth) Name() string { return "http-token-auth" } func (a *TokenAuth) String() string { masked := "*******" if a.Token == "" { masked = "<empty>" } return fmt.Sprintf("%s - %s", a.Name(), masked) } // Err is a dedicated error to return errors based on status code type Err struct { Response *http.Response Reason string } // NewErr returns a new Err based on a http response and closes response body // if needed func NewErr(r *http.Response) error { if r.StatusCode >= http.StatusOK && r.StatusCode < http.StatusMultipleChoices { return nil } var reason string // If a response message is present, add it to error var messageBuffer bytes.Buffer if r.Body != nil { messageLength, _ := messageBuffer.ReadFrom(r.Body) if messageLength > 0 { reason = messageBuffer.String() } _ = r.Body.Close() } switch r.StatusCode { case http.StatusUnauthorized: return fmt.Errorf("%w: %s", transport.ErrAuthenticationRequired, reason) case http.StatusForbidden: return fmt.Errorf("%w: %s", transport.ErrAuthorizationFailed, reason) case http.StatusNotFound: return fmt.Errorf("%w: %s", transport.ErrRepositoryNotFound, reason) } return plumbing.NewUnexpectedError(&Err{r, reason}) } // StatusCode returns the status code of the response func (e *Err) StatusCode() int { return e.Response.StatusCode } func (e *Err) Error() string { return fmt.Sprintf("unexpected requesting %q status code: %d", e.Response.Request.URL, e.Response.StatusCode, ) }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/jesseduffield/go-git/v5/plumbing/color/color.go
vendor/github.com/jesseduffield/go-git/v5/plumbing/color/color.go
package color // TODO read colors from a github.com/go-git/go-git/plumbing/format/config.Config struct // TODO implement color parsing, see https://github.com/git/git/blob/v2.26.2/color.c // Colors. See https://github.com/git/git/blob/v2.26.2/color.h#L24-L53. const ( Normal = "" Reset = "\033[m" Bold = "\033[1m" Red = "\033[31m" Green = "\033[32m" Yellow = "\033[33m" Blue = "\033[34m" Magenta = "\033[35m" Cyan = "\033[36m" BoldRed = "\033[1;31m" BoldGreen = "\033[1;32m" BoldYellow = "\033[1;33m" BoldBlue = "\033[1;34m" BoldMagenta = "\033[1;35m" BoldCyan = "\033[1;36m" FaintRed = "\033[2;31m" FaintGreen = "\033[2;32m" FaintYellow = "\033[2;33m" FaintBlue = "\033[2;34m" FaintMagenta = "\033[2;35m" FaintCyan = "\033[2;36m" BgRed = "\033[41m" BgGreen = "\033[42m" BgYellow = "\033[43m" BgBlue = "\033[44m" BgMagenta = "\033[45m" BgCyan = "\033[46m" Faint = "\033[2m" FaintItalic = "\033[2;3m" Reverse = "\033[7m" )
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/jesseduffield/go-git/v5/plumbing/protocol/packp/advrefs.go
vendor/github.com/jesseduffield/go-git/v5/plumbing/protocol/packp/advrefs.go
package packp import ( "fmt" "sort" "strings" "github.com/jesseduffield/go-git/v5/plumbing" "github.com/jesseduffield/go-git/v5/plumbing/protocol/packp/capability" "github.com/jesseduffield/go-git/v5/plumbing/storer" "github.com/jesseduffield/go-git/v5/storage/memory" ) // AdvRefs values represent the information transmitted on an // advertised-refs message. Values from this type are not zero-value // safe, use the New function instead. type AdvRefs struct { // Prefix stores prefix payloads. // // When using this message over (smart) HTTP, you have to add a pktline // before the whole thing with the following payload: // // '# service=$servicename" LF // // Moreover, some (all) git HTTP smart servers will send a flush-pkt // just after the first pkt-line. // // To accommodate both situations, the Prefix field allow you to store // any data you want to send before the actual pktlines. It will also // be filled up with whatever is found on the line. Prefix [][]byte // Head stores the resolved HEAD reference if present. // This can be present with git-upload-pack, not with git-receive-pack. Head *plumbing.Hash // Capabilities are the capabilities. Capabilities *capability.List // References are the hash references. References map[string]plumbing.Hash // Peeled are the peeled hash references. Peeled map[string]plumbing.Hash // Shallows are the shallow object ids. Shallows []plumbing.Hash } // NewAdvRefs returns a pointer to a new AdvRefs value, ready to be used. func NewAdvRefs() *AdvRefs { return &AdvRefs{ Prefix: [][]byte{}, Capabilities: capability.NewList(), References: make(map[string]plumbing.Hash), Peeled: make(map[string]plumbing.Hash), Shallows: []plumbing.Hash{}, } } func (a *AdvRefs) AddReference(r *plumbing.Reference) error { switch r.Type() { case plumbing.SymbolicReference: v := fmt.Sprintf("%s:%s", r.Name().String(), r.Target().String()) return a.Capabilities.Add(capability.SymRef, v) case plumbing.HashReference: a.References[r.Name().String()] = r.Hash() default: return plumbing.ErrInvalidType } return nil } func (a *AdvRefs) AllReferences() (memory.ReferenceStorage, error) { s := memory.ReferenceStorage{} if err := a.addRefs(s); err != nil { return s, plumbing.NewUnexpectedError(err) } return s, nil } func (a *AdvRefs) addRefs(s storer.ReferenceStorer) error { for name, hash := range a.References { ref := plumbing.NewReferenceFromStrings(name, hash.String()) if err := s.SetReference(ref); err != nil { return err } } if a.supportSymrefs() { return a.addSymbolicRefs(s) } return a.resolveHead(s) } // If the server does not support symrefs capability, // we need to guess the reference where HEAD is pointing to. // // Git versions prior to 1.8.4.3 has an special procedure to get // the reference where is pointing to HEAD: // - Check if a reference called master exists. If exists and it // has the same hash as HEAD hash, we can say that HEAD is pointing to master // - If master does not exists or does not have the same hash as HEAD, // order references and check in that order if that reference has the same // hash than HEAD. If yes, set HEAD pointing to that branch hash // - If no reference is found, throw an error func (a *AdvRefs) resolveHead(s storer.ReferenceStorer) error { if a.Head == nil { return nil } ref, err := s.Reference(plumbing.Master) // check first if HEAD is pointing to master if err == nil { ok, err := a.createHeadIfCorrectReference(ref, s) if err != nil { return err } if ok { return nil } } if err != nil && err != plumbing.ErrReferenceNotFound { return err } // From here we are trying to guess the branch that HEAD is pointing refIter, err := s.IterReferences() if err != nil { return err } var refNames []string err = refIter.ForEach(func(r *plumbing.Reference) error { refNames = append(refNames, string(r.Name())) return nil }) if err != nil { return err } sort.Strings(refNames) var headSet bool for _, refName := range refNames { ref, err := s.Reference(plumbing.ReferenceName(refName)) if err != nil { return err } ok, err := a.createHeadIfCorrectReference(ref, s) if err != nil { return err } if ok { headSet = true break } } if !headSet { return plumbing.ErrReferenceNotFound } return nil } func (a *AdvRefs) createHeadIfCorrectReference( reference *plumbing.Reference, s storer.ReferenceStorer) (bool, error) { if reference.Hash() == *a.Head { headRef := plumbing.NewSymbolicReference(plumbing.HEAD, reference.Name()) if err := s.SetReference(headRef); err != nil { return false, err } return true, nil } return false, nil } func (a *AdvRefs) addSymbolicRefs(s storer.ReferenceStorer) error { for _, symref := range a.Capabilities.Get(capability.SymRef) { chunks := strings.Split(symref, ":") if len(chunks) != 2 { err := fmt.Errorf("bad number of `:` in symref value (%q)", symref) return plumbing.NewUnexpectedError(err) } name := plumbing.ReferenceName(chunks[0]) target := plumbing.ReferenceName(chunks[1]) ref := plumbing.NewSymbolicReference(name, target) if err := s.SetReference(ref); err != nil { return nil } } return nil } func (a *AdvRefs) supportSymrefs() bool { return a.Capabilities.Supports(capability.SymRef) } // IsEmpty returns true if doesn't contain any reference. func (a *AdvRefs) IsEmpty() bool { return a.Head == nil && len(a.References) == 0 && len(a.Peeled) == 0 && len(a.Shallows) == 0 }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/jesseduffield/go-git/v5/plumbing/protocol/packp/srvresp.go
vendor/github.com/jesseduffield/go-git/v5/plumbing/protocol/packp/srvresp.go
package packp import ( "bufio" "bytes" "errors" "fmt" "io" "github.com/jesseduffield/go-git/v5/plumbing" "github.com/jesseduffield/go-git/v5/plumbing/format/pktline" ) const ackLineLen = 44 // ServerResponse object acknowledgement from upload-pack service type ServerResponse struct { ACKs []plumbing.Hash } // Decode decodes the response into the struct, isMultiACK should be true, if // the request was done with multi_ack or multi_ack_detailed capabilities. func (r *ServerResponse) Decode(reader *bufio.Reader, isMultiACK bool) error { s := pktline.NewScanner(reader) for s.Scan() { line := s.Bytes() if err := r.decodeLine(line); err != nil { return err } // we need to detect when the end of a response header and the beginning // of a packfile header happened, some requests to the git daemon // produces a duplicate ACK header even when multi_ack is not supported. stop, err := r.stopReading(reader) if err != nil { return err } if stop { break } } // isMultiACK is true when the remote server advertises the related // capabilities when they are not in transport.UnsupportedCapabilities. // // Users may decide to remove multi_ack and multi_ack_detailed from the // unsupported capabilities list, which allows them to do initial clones // from Azure DevOps. // // Follow-up fetches may error, therefore errors are wrapped with additional // information highlighting that this capabilities are not supported by go-git. // // TODO: Implement support for multi_ack or multi_ack_detailed responses. err := s.Err() if err != nil && isMultiACK { return fmt.Errorf("multi_ack and multi_ack_detailed are not supported: %w", err) } return err } // stopReading detects when a valid command such as ACK or NAK is found to be // read in the buffer without moving the read pointer. func (r *ServerResponse) stopReading(reader *bufio.Reader) (bool, error) { ahead, err := reader.Peek(7) if err == io.EOF { return true, nil } if err != nil { return false, err } if len(ahead) > 4 && r.isValidCommand(ahead[0:3]) { return false, nil } if len(ahead) == 7 && r.isValidCommand(ahead[4:]) { return false, nil } return true, nil } func (r *ServerResponse) isValidCommand(b []byte) bool { commands := [][]byte{ack, nak} for _, c := range commands { if bytes.Equal(b, c) { return true } } return false } func (r *ServerResponse) decodeLine(line []byte) error { if len(line) == 0 { return fmt.Errorf("unexpected flush") } if len(line) >= 3 { if bytes.Equal(line[0:3], ack) { return r.decodeACKLine(line) } if bytes.Equal(line[0:3], nak) { return nil } } return fmt.Errorf("unexpected content %q", string(line)) } func (r *ServerResponse) decodeACKLine(line []byte) error { if len(line) < ackLineLen { return fmt.Errorf("malformed ACK %q", line) } sp := bytes.Index(line, []byte(" ")) if sp+41 > len(line) { return fmt.Errorf("malformed ACK %q", line) } h := plumbing.NewHash(string(line[sp+1 : sp+41])) r.ACKs = append(r.ACKs, h) return nil } // Encode encodes the ServerResponse into a writer. func (r *ServerResponse) Encode(w io.Writer, isMultiACK bool) error { if len(r.ACKs) > 1 && !isMultiACK { // For further information, refer to comments in the Decode func above. return errors.New("multi_ack and multi_ack_detailed are not supported") } e := pktline.NewEncoder(w) if len(r.ACKs) == 0 { return e.Encodef("%s\n", nak) } return e.Encodef("%s %s\n", ack, r.ACKs[0].String()) }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/jesseduffield/go-git/v5/plumbing/protocol/packp/filter.go
vendor/github.com/jesseduffield/go-git/v5/plumbing/protocol/packp/filter.go
package packp import ( "errors" "fmt" "github.com/jesseduffield/go-git/v5/plumbing" "net/url" "strings" ) var ErrUnsupportedObjectFilterType = errors.New("unsupported object filter type") // Filter values enable the partial clone capability which causes // the server to omit objects that match the filter. // // See [Git's documentation] for more details. // // [Git's documentation]: https://github.com/git/git/blob/e02ecfcc534e2021aae29077a958dd11c3897e4c/Documentation/rev-list-options.txt#L948 type Filter string type BlobLimitPrefix string const ( BlobLimitPrefixNone BlobLimitPrefix = "" BlobLimitPrefixKibi BlobLimitPrefix = "k" BlobLimitPrefixMebi BlobLimitPrefix = "m" BlobLimitPrefixGibi BlobLimitPrefix = "g" ) // FilterBlobNone omits all blobs. func FilterBlobNone() Filter { return "blob:none" } // FilterBlobLimit omits blobs of size at least n bytes (when prefix is // BlobLimitPrefixNone), n kibibytes (when prefix is BlobLimitPrefixKibi), // n mebibytes (when prefix is BlobLimitPrefixMebi) or n gibibytes (when // prefix is BlobLimitPrefixGibi). n can be zero, in which case all blobs // will be omitted. func FilterBlobLimit(n uint64, prefix BlobLimitPrefix) Filter { return Filter(fmt.Sprintf("blob:limit=%d%s", n, prefix)) } // FilterTreeDepth omits all blobs and trees whose depth from the root tree // is larger or equal to depth. func FilterTreeDepth(depth uint64) Filter { return Filter(fmt.Sprintf("tree:%d", depth)) } // FilterObjectType omits all objects which are not of the requested type t. // Supported types are TagObject, CommitObject, TreeObject and BlobObject. func FilterObjectType(t plumbing.ObjectType) (Filter, error) { switch t { case plumbing.TagObject: fallthrough case plumbing.CommitObject: fallthrough case plumbing.TreeObject: fallthrough case plumbing.BlobObject: return Filter(fmt.Sprintf("object:type=%s", t.String())), nil default: return "", fmt.Errorf("%w: %s", ErrUnsupportedObjectFilterType, t.String()) } } // FilterCombine combines multiple Filter values together. func FilterCombine(filters ...Filter) Filter { var escapedFilters []string for _, filter := range filters { escapedFilters = append(escapedFilters, url.QueryEscape(string(filter))) } return Filter(fmt.Sprintf("combine:%s", strings.Join(escapedFilters, "+"))) }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/jesseduffield/go-git/v5/plumbing/protocol/packp/updreq_decode.go
vendor/github.com/jesseduffield/go-git/v5/plumbing/protocol/packp/updreq_decode.go
package packp import ( "bytes" "encoding/hex" "errors" "fmt" "io" "github.com/jesseduffield/go-git/v5/plumbing" "github.com/jesseduffield/go-git/v5/plumbing/format/pktline" ) var ( shallowLineLength = len(shallow) + hashSize minCommandLength = hashSize*2 + 2 + 1 minCommandAndCapsLength = minCommandLength + 1 ) var ( ErrEmpty = errors.New("empty update-request message") errNoCommands = errors.New("unexpected EOF before any command") errMissingCapabilitiesDelimiter = errors.New("capabilities delimiter not found") ) func errMalformedRequest(reason string) error { return fmt.Errorf("malformed request: %s", reason) } func errInvalidHashSize(got int) error { return fmt.Errorf("invalid hash size: expected %d, got %d", hashSize, got) } func errInvalidHash(err error) error { return fmt.Errorf("invalid hash: %s", err.Error()) } func errInvalidShallowLineLength(got int) error { return errMalformedRequest(fmt.Sprintf( "invalid shallow line length: expected %d, got %d", shallowLineLength, got)) } func errInvalidCommandCapabilitiesLineLength(got int) error { return errMalformedRequest(fmt.Sprintf( "invalid command and capabilities line length: expected at least %d, got %d", minCommandAndCapsLength, got)) } func errInvalidCommandLineLength(got int) error { return errMalformedRequest(fmt.Sprintf( "invalid command line length: expected at least %d, got %d", minCommandLength, got)) } func errInvalidShallowObjId(err error) error { return errMalformedRequest( fmt.Sprintf("invalid shallow object id: %s", err.Error())) } func errInvalidOldObjId(err error) error { return errMalformedRequest( fmt.Sprintf("invalid old object id: %s", err.Error())) } func errInvalidNewObjId(err error) error { return errMalformedRequest( fmt.Sprintf("invalid new object id: %s", err.Error())) } func errMalformedCommand(err error) error { return errMalformedRequest(fmt.Sprintf( "malformed command: %s", err.Error())) } // Decode reads the next update-request message form the reader and wr func (req *ReferenceUpdateRequest) Decode(r io.Reader) error { var rc io.ReadCloser var ok bool rc, ok = r.(io.ReadCloser) if !ok { rc = io.NopCloser(r) } d := &updReqDecoder{r: rc, s: pktline.NewScanner(r)} return d.Decode(req) } type updReqDecoder struct { r io.ReadCloser s *pktline.Scanner req *ReferenceUpdateRequest } func (d *updReqDecoder) Decode(req *ReferenceUpdateRequest) error { d.req = req funcs := []func() error{ d.scanLine, d.decodeShallow, d.decodeCommandAndCapabilities, d.decodeCommands, d.setPackfile, req.validate, } for _, f := range funcs { if err := f(); err != nil { return err } } return nil } func (d *updReqDecoder) scanLine() error { if ok := d.s.Scan(); !ok { return d.scanErrorOr(ErrEmpty) } return nil } func (d *updReqDecoder) decodeShallow() error { b := d.s.Bytes() if !bytes.HasPrefix(b, shallowNoSp) { return nil } if len(b) != shallowLineLength { return errInvalidShallowLineLength(len(b)) } h, err := parseHash(string(b[len(shallow):])) if err != nil { return errInvalidShallowObjId(err) } if ok := d.s.Scan(); !ok { return d.scanErrorOr(errNoCommands) } d.req.Shallow = &h return nil } func (d *updReqDecoder) decodeCommands() error { for { b := d.s.Bytes() if bytes.Equal(b, pktline.Flush) { return nil } c, err := parseCommand(b) if err != nil { return err } d.req.Commands = append(d.req.Commands, c) if ok := d.s.Scan(); !ok { return d.s.Err() } } } func (d *updReqDecoder) decodeCommandAndCapabilities() error { b := d.s.Bytes() i := bytes.IndexByte(b, 0) if i == -1 { return errMissingCapabilitiesDelimiter } if len(b) < minCommandAndCapsLength { return errInvalidCommandCapabilitiesLineLength(len(b)) } cmd, err := parseCommand(b[:i]) if err != nil { return err } d.req.Commands = append(d.req.Commands, cmd) if err := d.req.Capabilities.Decode(b[i+1:]); err != nil { return err } if err := d.scanLine(); err != nil { return err } return nil } func (d *updReqDecoder) setPackfile() error { d.req.Packfile = d.r return nil } func parseCommand(b []byte) (*Command, error) { if len(b) < minCommandLength { return nil, errInvalidCommandLineLength(len(b)) } var ( os, ns string n plumbing.ReferenceName ) if _, err := fmt.Sscanf(string(b), "%s %s %s", &os, &ns, &n); err != nil { return nil, errMalformedCommand(err) } oh, err := parseHash(os) if err != nil { return nil, errInvalidOldObjId(err) } nh, err := parseHash(ns) if err != nil { return nil, errInvalidNewObjId(err) } return &Command{Old: oh, New: nh, Name: n}, nil } func parseHash(s string) (plumbing.Hash, error) { if len(s) != hashSize { return plumbing.ZeroHash, errInvalidHashSize(len(s)) } if _, err := hex.DecodeString(s); err != nil { return plumbing.ZeroHash, errInvalidHash(err) } h := plumbing.NewHash(s) return h, nil } func (d *updReqDecoder) scanErrorOr(origErr error) error { if err := d.s.Err(); err != nil { return err } return origErr }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/jesseduffield/go-git/v5/plumbing/protocol/packp/gitproto.go
vendor/github.com/jesseduffield/go-git/v5/plumbing/protocol/packp/gitproto.go
package packp import ( "fmt" "io" "strings" "github.com/jesseduffield/go-git/v5/plumbing/format/pktline" ) var ( // ErrInvalidGitProtoRequest is returned by Decode if the input is not a // valid git protocol request. ErrInvalidGitProtoRequest = fmt.Errorf("invalid git protocol request") ) // GitProtoRequest is a command request for the git protocol. // It is used to send the command, endpoint, and extra parameters to the // remote. // See https://git-scm.com/docs/pack-protocol#_git_transport type GitProtoRequest struct { RequestCommand string Pathname string // Optional Host string // Optional ExtraParams []string } // validate validates the request. func (g *GitProtoRequest) validate() error { if g.RequestCommand == "" { return fmt.Errorf("%w: empty request command", ErrInvalidGitProtoRequest) } if g.Pathname == "" { return fmt.Errorf("%w: empty pathname", ErrInvalidGitProtoRequest) } return nil } // Encode encodes the request into the writer. func (g *GitProtoRequest) Encode(w io.Writer) error { if w == nil { return ErrNilWriter } if err := g.validate(); err != nil { return err } p := pktline.NewEncoder(w) req := fmt.Sprintf("%s %s\x00", g.RequestCommand, g.Pathname) if host := g.Host; host != "" { req += fmt.Sprintf("host=%s\x00", host) } if len(g.ExtraParams) > 0 { req += "\x00" for _, param := range g.ExtraParams { req += param + "\x00" } } if err := p.Encode([]byte(req)); err != nil { return err } return nil } // Decode decodes the request from the reader. func (g *GitProtoRequest) Decode(r io.Reader) error { s := pktline.NewScanner(r) if !s.Scan() { err := s.Err() if err == nil { return ErrInvalidGitProtoRequest } return err } line := string(s.Bytes()) if len(line) == 0 { return io.EOF } if line[len(line)-1] != 0 { return fmt.Errorf("%w: missing null terminator", ErrInvalidGitProtoRequest) } parts := strings.SplitN(line, " ", 2) if len(parts) != 2 { return fmt.Errorf("%w: short request", ErrInvalidGitProtoRequest) } g.RequestCommand = parts[0] params := strings.Split(parts[1], string(null)) if len(params) < 1 { return fmt.Errorf("%w: missing pathname", ErrInvalidGitProtoRequest) } g.Pathname = params[0] if len(params) > 1 { g.Host = strings.TrimPrefix(params[1], "host=") } if len(params) > 2 { for _, param := range params[2:] { if param != "" { g.ExtraParams = append(g.ExtraParams, param) } } } return nil }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/jesseduffield/go-git/v5/plumbing/protocol/packp/ulreq_encode.go
vendor/github.com/jesseduffield/go-git/v5/plumbing/protocol/packp/ulreq_encode.go
package packp import ( "bytes" "fmt" "io" "time" "github.com/jesseduffield/go-git/v5/plumbing" "github.com/jesseduffield/go-git/v5/plumbing/format/pktline" ) // Encode writes the UlReq encoding of u to the stream. // // All the payloads will end with a newline character. Wants and // shallows are sorted alphabetically. A depth of 0 means no depth // request is sent. func (req *UploadRequest) Encode(w io.Writer) error { e := newUlReqEncoder(w) return e.Encode(req) } type ulReqEncoder struct { pe *pktline.Encoder // where to write the encoded data data *UploadRequest // the data to encode err error // sticky error } func newUlReqEncoder(w io.Writer) *ulReqEncoder { return &ulReqEncoder{ pe: pktline.NewEncoder(w), } } func (e *ulReqEncoder) Encode(v *UploadRequest) error { e.data = v if len(v.Wants) == 0 { return fmt.Errorf("empty wants provided") } plumbing.HashesSort(e.data.Wants) for state := e.encodeFirstWant; state != nil; { state = state() } return e.err } func (e *ulReqEncoder) encodeFirstWant() stateFn { var err error if e.data.Capabilities.IsEmpty() { err = e.pe.Encodef("want %s\n", e.data.Wants[0]) } else { err = e.pe.Encodef( "want %s %s\n", e.data.Wants[0], e.data.Capabilities.String(), ) } if err != nil { e.err = fmt.Errorf("encoding first want line: %s", err) return nil } return e.encodeAdditionalWants } func (e *ulReqEncoder) encodeAdditionalWants() stateFn { last := e.data.Wants[0] for _, w := range e.data.Wants[1:] { if bytes.Equal(last[:], w[:]) { continue } if err := e.pe.Encodef("want %s\n", w); err != nil { e.err = fmt.Errorf("encoding want %q: %s", w, err) return nil } last = w } return e.encodeShallows } func (e *ulReqEncoder) encodeShallows() stateFn { plumbing.HashesSort(e.data.Shallows) var last plumbing.Hash for _, s := range e.data.Shallows { if bytes.Equal(last[:], s[:]) { continue } if err := e.pe.Encodef("shallow %s\n", s); err != nil { e.err = fmt.Errorf("encoding shallow %q: %s", s, err) return nil } last = s } return e.encodeDepth } func (e *ulReqEncoder) encodeDepth() stateFn { switch depth := e.data.Depth.(type) { case DepthCommits: if depth != 0 { commits := int(depth) if err := e.pe.Encodef("deepen %d\n", commits); err != nil { e.err = fmt.Errorf("encoding depth %d: %s", depth, err) return nil } } case DepthSince: when := time.Time(depth).UTC() if err := e.pe.Encodef("deepen-since %d\n", when.Unix()); err != nil { e.err = fmt.Errorf("encoding depth %s: %s", when, err) return nil } case DepthReference: reference := string(depth) if err := e.pe.Encodef("deepen-not %s\n", reference); err != nil { e.err = fmt.Errorf("encoding depth %s: %s", reference, err) return nil } default: e.err = fmt.Errorf("unsupported depth type") return nil } return e.encodeFilter } func (e *ulReqEncoder) encodeFilter() stateFn { if filter := e.data.Filter; filter != "" { if err := e.pe.Encodef("filter %s\n", filter); err != nil { e.err = fmt.Errorf("encoding filter %s: %s", filter, err) return nil } } return e.encodeFlush } func (e *ulReqEncoder) encodeFlush() stateFn { if err := e.pe.Flush(); err != nil { e.err = fmt.Errorf("encoding flush-pkt: %s", err) return nil } return nil }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/jesseduffield/go-git/v5/plumbing/protocol/packp/ulreq.go
vendor/github.com/jesseduffield/go-git/v5/plumbing/protocol/packp/ulreq.go
package packp import ( "fmt" "time" "github.com/jesseduffield/go-git/v5/plumbing" "github.com/jesseduffield/go-git/v5/plumbing/protocol/packp/capability" ) // UploadRequest values represent the information transmitted on a // upload-request message. Values from this type are not zero-value // safe, use the New function instead. // This is a low level type, use UploadPackRequest instead. type UploadRequest struct { Capabilities *capability.List Wants []plumbing.Hash Shallows []plumbing.Hash Depth Depth Filter Filter } // Depth values stores the desired depth of the requested packfile: see // DepthCommit, DepthSince and DepthReference. type Depth interface { isDepth() IsZero() bool } // DepthCommits values stores the maximum number of requested commits in // the packfile. Zero means infinite. A negative value will have // undefined consequences. type DepthCommits int func (d DepthCommits) isDepth() {} func (d DepthCommits) IsZero() bool { return d == 0 } // DepthSince values requests only commits newer than the specified time. type DepthSince time.Time func (d DepthSince) isDepth() {} func (d DepthSince) IsZero() bool { return time.Time(d).IsZero() } // DepthReference requests only commits not to found in the specified reference. type DepthReference string func (d DepthReference) isDepth() {} func (d DepthReference) IsZero() bool { return string(d) == "" } // NewUploadRequest returns a pointer to a new UploadRequest value, ready to be // used. It has no capabilities, wants or shallows and an infinite depth. Please // note that to encode an upload-request it has to have at least one wanted hash. func NewUploadRequest() *UploadRequest { return &UploadRequest{ Capabilities: capability.NewList(), Wants: []plumbing.Hash{}, Shallows: []plumbing.Hash{}, Depth: DepthCommits(0), } } // NewUploadRequestFromCapabilities returns a pointer to a new UploadRequest // value, the request capabilities are filled with the most optimal ones, based // on the adv value (advertised capabilities), the UploadRequest generated it // has no wants or shallows and an infinite depth. func NewUploadRequestFromCapabilities(adv *capability.List) *UploadRequest { r := NewUploadRequest() if adv.Supports(capability.MultiACKDetailed) { r.Capabilities.Set(capability.MultiACKDetailed) } else if adv.Supports(capability.MultiACK) { r.Capabilities.Set(capability.MultiACK) } if adv.Supports(capability.Sideband64k) { r.Capabilities.Set(capability.Sideband64k) } else if adv.Supports(capability.Sideband) { r.Capabilities.Set(capability.Sideband) } if adv.Supports(capability.ThinPack) { r.Capabilities.Set(capability.ThinPack) } if adv.Supports(capability.OFSDelta) { r.Capabilities.Set(capability.OFSDelta) } if adv.Supports(capability.Agent) { r.Capabilities.Set(capability.Agent, capability.DefaultAgent()) } return r } // Validate validates the content of UploadRequest, following the next rules: // - Wants MUST have at least one reference // - capability.Shallow MUST be present if Shallows is not empty // - is a non-zero DepthCommits is given capability.Shallow MUST be present // - is a DepthSince is given capability.Shallow MUST be present // - is a DepthReference is given capability.DeepenNot MUST be present // - MUST contain only maximum of one of capability.Sideband and capability.Sideband64k // - MUST contain only maximum of one of capability.MultiACK and capability.MultiACKDetailed func (req *UploadRequest) Validate() error { if len(req.Wants) == 0 { return fmt.Errorf("want can't be empty") } if err := req.validateRequiredCapabilities(); err != nil { return err } if err := req.validateConflictCapabilities(); err != nil { return err } return nil } func (req *UploadRequest) validateRequiredCapabilities() error { msg := "missing capability %s" if len(req.Shallows) != 0 && !req.Capabilities.Supports(capability.Shallow) { return fmt.Errorf(msg, capability.Shallow) } switch req.Depth.(type) { case DepthCommits: if req.Depth != DepthCommits(0) { if !req.Capabilities.Supports(capability.Shallow) { return fmt.Errorf(msg, capability.Shallow) } } case DepthSince: if !req.Capabilities.Supports(capability.DeepenSince) { return fmt.Errorf(msg, capability.DeepenSince) } case DepthReference: if !req.Capabilities.Supports(capability.DeepenNot) { return fmt.Errorf(msg, capability.DeepenNot) } } return nil } func (req *UploadRequest) validateConflictCapabilities() error { msg := "capabilities %s and %s are mutually exclusive" if req.Capabilities.Supports(capability.Sideband) && req.Capabilities.Supports(capability.Sideband64k) { return fmt.Errorf(msg, capability.Sideband, capability.Sideband64k) } if req.Capabilities.Supports(capability.MultiACK) && req.Capabilities.Supports(capability.MultiACKDetailed) { return fmt.Errorf(msg, capability.MultiACK, capability.MultiACKDetailed) } return nil }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/jesseduffield/go-git/v5/plumbing/protocol/packp/report_status.go
vendor/github.com/jesseduffield/go-git/v5/plumbing/protocol/packp/report_status.go
package packp import ( "bytes" "fmt" "io" "strings" "github.com/jesseduffield/go-git/v5/plumbing" "github.com/jesseduffield/go-git/v5/plumbing/format/pktline" ) const ( ok = "ok" ) // ReportStatus is a report status message, as used in the git-receive-pack // process whenever the 'report-status' capability is negotiated. type ReportStatus struct { UnpackStatus string CommandStatuses []*CommandStatus } // NewReportStatus creates a new ReportStatus message. func NewReportStatus() *ReportStatus { return &ReportStatus{} } // Error returns the first error if any. func (s *ReportStatus) Error() error { if s.UnpackStatus != ok { return fmt.Errorf("unpack error: %s", s.UnpackStatus) } for _, s := range s.CommandStatuses { if err := s.Error(); err != nil { return err } } return nil } // Encode writes the report status to a writer. func (s *ReportStatus) Encode(w io.Writer) error { e := pktline.NewEncoder(w) if err := e.Encodef("unpack %s\n", s.UnpackStatus); err != nil { return err } for _, cs := range s.CommandStatuses { if err := cs.encode(w); err != nil { return err } } return e.Flush() } // Decode reads from the given reader and decodes a report-status message. It // does not read more input than what is needed to fill the report status. func (s *ReportStatus) Decode(r io.Reader) error { scan := pktline.NewScanner(r) if err := s.scanFirstLine(scan); err != nil { return err } if err := s.decodeReportStatus(scan.Bytes()); err != nil { return err } flushed := false for scan.Scan() { b := scan.Bytes() if isFlush(b) { flushed = true break } if err := s.decodeCommandStatus(b); err != nil { return err } } if !flushed { return fmt.Errorf("missing flush") } return scan.Err() } func (s *ReportStatus) scanFirstLine(scan *pktline.Scanner) error { if scan.Scan() { return nil } if scan.Err() != nil { return scan.Err() } return io.ErrUnexpectedEOF } func (s *ReportStatus) decodeReportStatus(b []byte) error { if isFlush(b) { return fmt.Errorf("premature flush") } b = bytes.TrimSuffix(b, eol) line := string(b) fields := strings.SplitN(line, " ", 2) if len(fields) != 2 || fields[0] != "unpack" { return fmt.Errorf("malformed unpack status: %s", line) } s.UnpackStatus = fields[1] return nil } func (s *ReportStatus) decodeCommandStatus(b []byte) error { b = bytes.TrimSuffix(b, eol) line := string(b) fields := strings.SplitN(line, " ", 3) status := ok if len(fields) == 3 && fields[0] == "ng" { status = fields[2] } else if len(fields) != 2 || fields[0] != "ok" { return fmt.Errorf("malformed command status: %s", line) } cs := &CommandStatus{ ReferenceName: plumbing.ReferenceName(fields[1]), Status: status, } s.CommandStatuses = append(s.CommandStatuses, cs) return nil } // CommandStatus is the status of a reference in a report status. // See ReportStatus struct. type CommandStatus struct { ReferenceName plumbing.ReferenceName Status string } // Error returns the error, if any. func (s *CommandStatus) Error() error { if s.Status == ok { return nil } return fmt.Errorf("command error on %s: %s", s.ReferenceName.String(), s.Status) } func (s *CommandStatus) encode(w io.Writer) error { e := pktline.NewEncoder(w) if s.Error() == nil { return e.Encodef("ok %s\n", s.ReferenceName.String()) } return e.Encodef("ng %s %s\n", s.ReferenceName.String(), s.Status) }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/jesseduffield/go-git/v5/plumbing/protocol/packp/updreq_encode.go
vendor/github.com/jesseduffield/go-git/v5/plumbing/protocol/packp/updreq_encode.go
package packp import ( "fmt" "io" "github.com/jesseduffield/go-git/v5/plumbing" "github.com/jesseduffield/go-git/v5/plumbing/format/pktline" "github.com/jesseduffield/go-git/v5/plumbing/protocol/packp/capability" ) // Encode writes the ReferenceUpdateRequest encoding to the stream. func (req *ReferenceUpdateRequest) Encode(w io.Writer) error { if err := req.validate(); err != nil { return err } e := pktline.NewEncoder(w) if err := req.encodeShallow(e, req.Shallow); err != nil { return err } if err := req.encodeCommands(e, req.Commands, req.Capabilities); err != nil { return err } if req.Capabilities.Supports(capability.PushOptions) { if err := req.encodeOptions(e, req.Options); err != nil { return err } } if req.Packfile != nil { if _, err := io.Copy(w, req.Packfile); err != nil { return err } return req.Packfile.Close() } return nil } func (req *ReferenceUpdateRequest) encodeShallow(e *pktline.Encoder, h *plumbing.Hash) error { if h == nil { return nil } objId := []byte(h.String()) return e.Encodef("%s%s", shallow, objId) } func (req *ReferenceUpdateRequest) encodeCommands(e *pktline.Encoder, cmds []*Command, cap *capability.List) error { if err := e.Encodef("%s\x00%s", formatCommand(cmds[0]), cap.String()); err != nil { return err } for _, cmd := range cmds[1:] { if err := e.Encodef(formatCommand(cmd)); err != nil { return err } } return e.Flush() } func formatCommand(cmd *Command) string { o := cmd.Old.String() n := cmd.New.String() return fmt.Sprintf("%s %s %s", o, n, cmd.Name) } func (req *ReferenceUpdateRequest) encodeOptions(e *pktline.Encoder, opts []*Option) error { for _, opt := range opts { if err := e.Encodef("%s=%s", opt.Key, opt.Value); err != nil { return err } } return e.Flush() }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/jesseduffield/go-git/v5/plumbing/protocol/packp/updreq.go
vendor/github.com/jesseduffield/go-git/v5/plumbing/protocol/packp/updreq.go
package packp import ( "errors" "io" "github.com/jesseduffield/go-git/v5/plumbing" "github.com/jesseduffield/go-git/v5/plumbing/protocol/packp/capability" "github.com/jesseduffield/go-git/v5/plumbing/protocol/packp/sideband" ) var ( ErrEmptyCommands = errors.New("commands cannot be empty") ErrMalformedCommand = errors.New("malformed command") ) // ReferenceUpdateRequest values represent reference upload requests. // Values from this type are not zero-value safe, use the New function instead. type ReferenceUpdateRequest struct { Capabilities *capability.List Commands []*Command Options []*Option Shallow *plumbing.Hash // Packfile contains an optional packfile reader. Packfile io.ReadCloser // Progress receives sideband progress messages from the server Progress sideband.Progress } // New returns a pointer to a new ReferenceUpdateRequest value. func NewReferenceUpdateRequest() *ReferenceUpdateRequest { return &ReferenceUpdateRequest{ // TODO: Add support for push-cert Capabilities: capability.NewList(), Commands: nil, } } // NewReferenceUpdateRequestFromCapabilities returns a pointer to a new // ReferenceUpdateRequest value, the request capabilities are filled with the // most optimal ones, based on the adv value (advertised capabilities), the // ReferenceUpdateRequest contains no commands // // It does set the following capabilities: // - agent // - report-status // - ofs-delta // - ref-delta // - delete-refs // It leaves up to the user to add the following capabilities later: // - atomic // - ofs-delta // - side-band // - side-band-64k // - quiet // - push-cert func NewReferenceUpdateRequestFromCapabilities(adv *capability.List) *ReferenceUpdateRequest { r := NewReferenceUpdateRequest() if adv.Supports(capability.Agent) { r.Capabilities.Set(capability.Agent, capability.DefaultAgent()) } if adv.Supports(capability.ReportStatus) { r.Capabilities.Set(capability.ReportStatus) } return r } func (req *ReferenceUpdateRequest) validate() error { if len(req.Commands) == 0 { return ErrEmptyCommands } for _, c := range req.Commands { if err := c.validate(); err != nil { return err } } return nil } type Action string const ( Create Action = "create" Update Action = "update" Delete Action = "delete" Invalid Action = "invalid" ) type Command struct { Name plumbing.ReferenceName Old plumbing.Hash New plumbing.Hash } func (c *Command) Action() Action { if c.Old == plumbing.ZeroHash && c.New == plumbing.ZeroHash { return Invalid } if c.Old == plumbing.ZeroHash { return Create } if c.New == plumbing.ZeroHash { return Delete } return Update } func (c *Command) validate() error { if c.Action() == Invalid { return ErrMalformedCommand } return nil } type Option struct { Key string Value string }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/jesseduffield/go-git/v5/plumbing/protocol/packp/advrefs_decode.go
vendor/github.com/jesseduffield/go-git/v5/plumbing/protocol/packp/advrefs_decode.go
package packp import ( "bytes" "encoding/hex" "errors" "fmt" "io" "github.com/jesseduffield/go-git/v5/plumbing" "github.com/jesseduffield/go-git/v5/plumbing/format/pktline" ) // Decode reads the next advertised-refs message form its input and // stores it in the AdvRefs. func (a *AdvRefs) Decode(r io.Reader) error { d := newAdvRefsDecoder(r) return d.Decode(a) } type advRefsDecoder struct { s *pktline.Scanner // a pkt-line scanner from the input stream line []byte // current pkt-line contents, use parser.nextLine() to make it advance nLine int // current pkt-line number for debugging, begins at 1 hash plumbing.Hash // last hash read err error // sticky error, use the parser.error() method to fill this out data *AdvRefs // parsed data is stored here } var ( // ErrEmptyAdvRefs is returned by Decode if it gets an empty advertised // references message. ErrEmptyAdvRefs = errors.New("empty advertised-ref message") // ErrEmptyInput is returned by Decode if the input is empty. ErrEmptyInput = errors.New("empty input") ) func newAdvRefsDecoder(r io.Reader) *advRefsDecoder { return &advRefsDecoder{ s: pktline.NewScanner(r), } } func (d *advRefsDecoder) Decode(v *AdvRefs) error { d.data = v for state := decodePrefix; state != nil; { state = state(d) } return d.err } type decoderStateFn func(*advRefsDecoder) decoderStateFn // fills out the parser sticky error func (d *advRefsDecoder) error(format string, a ...interface{}) { msg := fmt.Sprintf( "pkt-line %d: %s", d.nLine, fmt.Sprintf(format, a...), ) d.err = NewErrUnexpectedData(msg, d.line) } // Reads a new pkt-line from the scanner, makes its payload available as // p.line and increments p.nLine. A successful invocation returns true, // otherwise, false is returned and the sticky error is filled out // accordingly. Trims eols at the end of the payloads. func (d *advRefsDecoder) nextLine() bool { d.nLine++ if !d.s.Scan() { if d.err = d.s.Err(); d.err != nil { return false } if d.nLine == 1 { d.err = ErrEmptyInput return false } d.error("EOF") return false } d.line = d.s.Bytes() d.line = bytes.TrimSuffix(d.line, eol) return true } // The HTTP smart prefix is often followed by a flush-pkt. func decodePrefix(d *advRefsDecoder) decoderStateFn { if ok := d.nextLine(); !ok { return nil } if !isPrefix(d.line) { return decodeFirstHash } tmp := make([]byte, len(d.line)) copy(tmp, d.line) d.data.Prefix = append(d.data.Prefix, tmp) if ok := d.nextLine(); !ok { return nil } if !isFlush(d.line) { return decodeFirstHash } d.data.Prefix = append(d.data.Prefix, pktline.Flush) if ok := d.nextLine(); !ok { return nil } return decodeFirstHash } func isPrefix(payload []byte) bool { return len(payload) > 0 && payload[0] == '#' } // If the first hash is zero, then a no-refs is coming. Otherwise, a // list-of-refs is coming, and the hash will be followed by the first // advertised ref. func decodeFirstHash(p *advRefsDecoder) decoderStateFn { // If the repository is empty, we receive a flush here (HTTP). if isFlush(p.line) { p.err = ErrEmptyAdvRefs return nil } // TODO: Use object-format (when available) for hash size. Git 2.41+ if len(p.line) < hashSize { p.error("cannot read hash, pkt-line too short") return nil } if _, err := hex.Decode(p.hash[:], p.line[:hashSize]); err != nil { p.error("invalid hash text: %s", err) return nil } p.line = p.line[hashSize:] if p.hash.IsZero() { return decodeSkipNoRefs } return decodeFirstRef } // Skips SP "capabilities^{}" NUL func decodeSkipNoRefs(p *advRefsDecoder) decoderStateFn { if len(p.line) < len(noHeadMark) { p.error("too short zero-id ref") return nil } if !bytes.HasPrefix(p.line, noHeadMark) { p.error("malformed zero-id ref") return nil } p.line = p.line[len(noHeadMark):] return decodeCaps } // decode the refname, expects SP refname NULL func decodeFirstRef(l *advRefsDecoder) decoderStateFn { if len(l.line) < 3 { l.error("line too short after hash") return nil } if !bytes.HasPrefix(l.line, sp) { l.error("no space after hash") return nil } l.line = l.line[1:] chunks := bytes.SplitN(l.line, null, 2) if len(chunks) < 2 { l.error("NULL not found") return nil } ref := chunks[0] l.line = chunks[1] if bytes.Equal(ref, []byte(head)) { l.data.Head = &l.hash } else { l.data.References[string(ref)] = l.hash } return decodeCaps } func decodeCaps(p *advRefsDecoder) decoderStateFn { if err := p.data.Capabilities.Decode(p.line); err != nil { p.error("invalid capabilities: %s", err) return nil } return decodeOtherRefs } // The refs are either tips (obj-id SP refname) or a peeled (obj-id SP refname^{}). // If there are no refs, then there might be a shallow or flush-ptk. func decodeOtherRefs(p *advRefsDecoder) decoderStateFn { if ok := p.nextLine(); !ok { return nil } if bytes.HasPrefix(p.line, shallow) { return decodeShallow } if len(p.line) == 0 { return nil } saveTo := p.data.References if bytes.HasSuffix(p.line, peeled) { p.line = bytes.TrimSuffix(p.line, peeled) saveTo = p.data.Peeled } ref, hash, err := readRef(p.line) if err != nil { p.error("%s", err) return nil } saveTo[ref] = hash return decodeOtherRefs } // Reads a ref-name func readRef(data []byte) (string, plumbing.Hash, error) { chunks := bytes.Split(data, sp) switch { case len(chunks) == 1: return "", plumbing.ZeroHash, fmt.Errorf("malformed ref data: no space was found") case len(chunks) > 2: return "", plumbing.ZeroHash, fmt.Errorf("malformed ref data: more than one space found") default: return string(chunks[1]), plumbing.NewHash(string(chunks[0])), nil } } // Keeps reading shallows until a flush-pkt is found func decodeShallow(p *advRefsDecoder) decoderStateFn { if !bytes.HasPrefix(p.line, shallow) { p.error("malformed shallow prefix, found %q... instead", p.line[:len(shallow)]) return nil } p.line = bytes.TrimPrefix(p.line, shallow) if len(p.line) != hashSize { p.error(fmt.Sprintf( "malformed shallow hash: wrong length, expected 40 bytes, read %d bytes", len(p.line))) return nil } text := p.line[:hashSize] var h plumbing.Hash if _, err := hex.Decode(h[:], text); err != nil { p.error("invalid hash text: %s", err) return nil } p.data.Shallows = append(p.data.Shallows, h) if ok := p.nextLine(); !ok { return nil } if len(p.line) == 0 { return nil // successful parse of the advertised-refs message } return decodeShallow }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/jesseduffield/go-git/v5/plumbing/protocol/packp/ulreq_decode.go
vendor/github.com/jesseduffield/go-git/v5/plumbing/protocol/packp/ulreq_decode.go
package packp import ( "bytes" "encoding/hex" "fmt" "io" "strconv" "time" "github.com/jesseduffield/go-git/v5/plumbing" "github.com/jesseduffield/go-git/v5/plumbing/format/pktline" ) // Decode reads the next upload-request form its input and // stores it in the UploadRequest. func (req *UploadRequest) Decode(r io.Reader) error { d := newUlReqDecoder(r) return d.Decode(req) } type ulReqDecoder struct { s *pktline.Scanner // a pkt-line scanner from the input stream line []byte // current pkt-line contents, use parser.nextLine() to make it advance nLine int // current pkt-line number for debugging, begins at 1 err error // sticky error, use the parser.error() method to fill this out data *UploadRequest // parsed data is stored here } func newUlReqDecoder(r io.Reader) *ulReqDecoder { return &ulReqDecoder{ s: pktline.NewScanner(r), } } func (d *ulReqDecoder) Decode(v *UploadRequest) error { d.data = v for state := d.decodeFirstWant; state != nil; { state = state() } return d.err } // fills out the parser sticky error func (d *ulReqDecoder) error(format string, a ...interface{}) { msg := fmt.Sprintf( "pkt-line %d: %s", d.nLine, fmt.Sprintf(format, a...), ) d.err = NewErrUnexpectedData(msg, d.line) } // Reads a new pkt-line from the scanner, makes its payload available as // p.line and increments p.nLine. A successful invocation returns true, // otherwise, false is returned and the sticky error is filled out // accordingly. Trims eols at the end of the payloads. func (d *ulReqDecoder) nextLine() bool { d.nLine++ if !d.s.Scan() { if d.err = d.s.Err(); d.err != nil { return false } d.error("EOF") return false } d.line = d.s.Bytes() d.line = bytes.TrimSuffix(d.line, eol) return true } // Expected format: want <hash>[ capabilities] func (d *ulReqDecoder) decodeFirstWant() stateFn { if ok := d.nextLine(); !ok { return nil } if !bytes.HasPrefix(d.line, want) { d.error("missing 'want ' prefix") return nil } d.line = bytes.TrimPrefix(d.line, want) hash, ok := d.readHash() if !ok { return nil } d.data.Wants = append(d.data.Wants, hash) return d.decodeCaps } func (d *ulReqDecoder) readHash() (plumbing.Hash, bool) { if len(d.line) < hashSize { d.err = fmt.Errorf("malformed hash: %v", d.line) return plumbing.ZeroHash, false } var hash plumbing.Hash if _, err := hex.Decode(hash[:], d.line[:hashSize]); err != nil { d.error("invalid hash text: %s", err) return plumbing.ZeroHash, false } d.line = d.line[hashSize:] return hash, true } // Expected format: sp cap1 sp cap2 sp cap3... func (d *ulReqDecoder) decodeCaps() stateFn { d.line = bytes.TrimPrefix(d.line, sp) if err := d.data.Capabilities.Decode(d.line); err != nil { d.error("invalid capabilities: %s", err) } return d.decodeOtherWants } // Expected format: want <hash> func (d *ulReqDecoder) decodeOtherWants() stateFn { if ok := d.nextLine(); !ok { return nil } if bytes.HasPrefix(d.line, shallow) { return d.decodeShallow } if bytes.HasPrefix(d.line, deepen) { return d.decodeDeepen } if len(d.line) == 0 { return nil } if !bytes.HasPrefix(d.line, want) { d.error("unexpected payload while expecting a want: %q", d.line) return nil } d.line = bytes.TrimPrefix(d.line, want) hash, ok := d.readHash() if !ok { return nil } d.data.Wants = append(d.data.Wants, hash) return d.decodeOtherWants } // Expected format: shallow <hash> func (d *ulReqDecoder) decodeShallow() stateFn { if bytes.HasPrefix(d.line, deepen) { return d.decodeDeepen } if len(d.line) == 0 { return nil } if !bytes.HasPrefix(d.line, shallow) { d.error("unexpected payload while expecting a shallow: %q", d.line) return nil } d.line = bytes.TrimPrefix(d.line, shallow) hash, ok := d.readHash() if !ok { return nil } d.data.Shallows = append(d.data.Shallows, hash) if ok := d.nextLine(); !ok { return nil } return d.decodeShallow } // Expected format: deepen <n> / deepen-since <ul> / deepen-not <ref> func (d *ulReqDecoder) decodeDeepen() stateFn { if bytes.HasPrefix(d.line, deepenCommits) { return d.decodeDeepenCommits } if bytes.HasPrefix(d.line, deepenSince) { return d.decodeDeepenSince } if bytes.HasPrefix(d.line, deepenReference) { return d.decodeDeepenReference } if len(d.line) == 0 { return nil } d.error("unexpected deepen specification: %q", d.line) return nil } func (d *ulReqDecoder) decodeDeepenCommits() stateFn { d.line = bytes.TrimPrefix(d.line, deepenCommits) var n int if n, d.err = strconv.Atoi(string(d.line)); d.err != nil { return nil } if n < 0 { d.err = fmt.Errorf("negative depth") return nil } d.data.Depth = DepthCommits(n) return d.decodeFlush } func (d *ulReqDecoder) decodeDeepenSince() stateFn { d.line = bytes.TrimPrefix(d.line, deepenSince) var secs int64 secs, d.err = strconv.ParseInt(string(d.line), 10, 64) if d.err != nil { return nil } t := time.Unix(secs, 0).UTC() d.data.Depth = DepthSince(t) return d.decodeFlush } func (d *ulReqDecoder) decodeDeepenReference() stateFn { d.line = bytes.TrimPrefix(d.line, deepenReference) d.data.Depth = DepthReference(string(d.line)) return d.decodeFlush } func (d *ulReqDecoder) decodeFlush() stateFn { if ok := d.nextLine(); !ok { return nil } if len(d.line) != 0 { d.err = fmt.Errorf("unexpected payload while expecting a flush-pkt: %q", d.line) } return nil }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/jesseduffield/go-git/v5/plumbing/protocol/packp/doc.go
vendor/github.com/jesseduffield/go-git/v5/plumbing/protocol/packp/doc.go
package packp /* A nice way to trace the real data transmitted and received by git, use: GIT_TRACE_PACKET=true git ls-remote http://github.com/src-d/go-git GIT_TRACE_PACKET=true git clone http://github.com/src-d/go-git Here follows a copy of the current protocol specification at the time of this writing. (Please notice that most http git servers will add a flush-pkt after the first pkt-line when using HTTP smart.) Documentation Common to Pack and Http Protocols =============================================== ABNF Notation ------------- ABNF notation as described by RFC 5234 is used within the protocol documents, except the following replacement core rules are used: ---- HEXDIG = DIGIT / "a" / "b" / "c" / "d" / "e" / "f" ---- We also define the following common rules: ---- NUL = %x00 zero-id = 40*"0" obj-id = 40*(HEXDIGIT) refname = "HEAD" refname /= "refs/" <see discussion below> ---- A refname is a hierarchical octet string beginning with "refs/" and not violating the 'git-check-ref-format' command's validation rules. More specifically, they: . They can include slash `/` for hierarchical (directory) grouping, but no slash-separated component can begin with a dot `.`. . They must contain at least one `/`. This enforces the presence of a category like `heads/`, `tags/` etc. but the actual names are not restricted. . They cannot have two consecutive dots `..` anywhere. . They cannot have ASCII control characters (i.e. bytes whose values are lower than \040, or \177 `DEL`), space, tilde `~`, caret `^`, colon `:`, question-mark `?`, asterisk `*`, or open bracket `[` anywhere. . They cannot end with a slash `/` or a dot `.`. . They cannot end with the sequence `.lock`. . They cannot contain a sequence `@{`. . They cannot contain a `\\`. pkt-line Format --------------- Much (but not all) of the payload is described around pkt-lines. A pkt-line is a variable length binary string. The first four bytes of the line, the pkt-len, indicates the total length of the line, in hexadecimal. The pkt-len includes the 4 bytes used to contain the length's hexadecimal representation. A pkt-line MAY contain binary data, so implementors MUST ensure pkt-line parsing/formatting routines are 8-bit clean. A non-binary line SHOULD BE terminated by an LF, which if present MUST be included in the total length. Receivers MUST treat pkt-lines with non-binary data the same whether or not they contain the trailing LF (stripping the LF if present, and not complaining when it is missing). The maximum length of a pkt-line's data component is 65516 bytes. Implementations MUST NOT send pkt-line whose length exceeds 65520 (65516 bytes of payload + 4 bytes of length data). Implementations SHOULD NOT send an empty pkt-line ("0004"). A pkt-line with a length field of 0 ("0000"), called a flush-pkt, is a special case and MUST be handled differently than an empty pkt-line ("0004"). ---- pkt-line = data-pkt / flush-pkt data-pkt = pkt-len pkt-payload pkt-len = 4*(HEXDIG) pkt-payload = (pkt-len - 4)*(OCTET) flush-pkt = "0000" ---- Examples (as C-style strings): ---- pkt-line actual value --------------------------------- "0006a\n" "a\n" "0005a" "a" "000bfoobar\n" "foobar\n" "0004" "" ---- Packfile transfer protocols =========================== Git supports transferring data in packfiles over the ssh://, git://, http:// and file:// transports. There exist two sets of protocols, one for pushing data from a client to a server and another for fetching data from a server to a client. The three transports (ssh, git, file) use the same protocol to transfer data. http is documented in http-protocol.txt. The processes invoked in the canonical Git implementation are 'upload-pack' on the server side and 'fetch-pack' on the client side for fetching data; then 'receive-pack' on the server and 'send-pack' on the client for pushing data. The protocol functions to have a server tell a client what is currently on the server, then for the two to negotiate the smallest amount of data to send in order to fully update one or the other. pkt-line Format --------------- The descriptions below build on the pkt-line format described in protocol-common.txt. When the grammar indicate `PKT-LINE(...)`, unless otherwise noted the usual pkt-line LF rules apply: the sender SHOULD include a LF, but the receiver MUST NOT complain if it is not present. Transports ---------- There are three transports over which the packfile protocol is initiated. The Git transport is a simple, unauthenticated server that takes the command (almost always 'upload-pack', though Git servers can be configured to be globally writable, in which 'receive- pack' initiation is also allowed) with which the client wishes to communicate and executes it and connects it to the requesting process. In the SSH transport, the client just runs the 'upload-pack' or 'receive-pack' process on the server over the SSH protocol and then communicates with that invoked process over the SSH connection. The file:// transport runs the 'upload-pack' or 'receive-pack' process locally and communicates with it over a pipe. Git Transport ------------- The Git transport starts off by sending the command and repository on the wire using the pkt-line format, followed by a NUL byte and a hostname parameter, terminated by a NUL byte. 0032git-upload-pack /project.git\0host=myserver.com\0 -- git-proto-request = request-command SP pathname NUL [ host-parameter NUL ] request-command = "git-upload-pack" / "git-receive-pack" / "git-upload-archive" ; case sensitive pathname = *( %x01-ff ) ; exclude NUL host-parameter = "host=" hostname [ ":" port ] -- Only host-parameter is allowed in the git-proto-request. Clients MUST NOT attempt to send additional parameters. It is used for the git-daemon name based virtual hosting. See --interpolated-path option to git daemon, with the %H/%CH format characters. Basically what the Git client is doing to connect to an 'upload-pack' process on the server side over the Git protocol is this: $ echo -e -n \ "0039git-upload-pack /schacon/gitbook.git\0host=example.com\0" | nc -v example.com 9418 If the server refuses the request for some reasons, it could abort gracefully with an error message. ---- error-line = PKT-LINE("ERR" SP explanation-text) ---- SSH Transport ------------- Initiating the upload-pack or receive-pack processes over SSH is executing the binary on the server via SSH remote execution. It is basically equivalent to running this: $ ssh git.example.com "git-upload-pack '/project.git'" For a server to support Git pushing and pulling for a given user over SSH, that user needs to be able to execute one or both of those commands via the SSH shell that they are provided on login. On some systems, that shell access is limited to only being able to run those two commands, or even just one of them. In an ssh:// format URI, it's absolute in the URI, so the '/' after the host name (or port number) is sent as an argument, which is then read by the remote git-upload-pack exactly as is, so it's effectively an absolute path in the remote filesystem. git clone ssh://user@example.com/project.git | v ssh user@example.com "git-upload-pack '/project.git'" In a "user@host:path" format URI, its relative to the user's home directory, because the Git client will run: git clone user@example.com:project.git | v ssh user@example.com "git-upload-pack 'project.git'" The exception is if a '~' is used, in which case we execute it without the leading '/'. ssh://user@example.com/~alice/project.git, | v ssh user@example.com "git-upload-pack '~alice/project.git'" A few things to remember here: - The "command name" is spelled with dash (e.g. git-upload-pack), but this can be overridden by the client; - The repository path is always quoted with single quotes. Fetching Data From a Server --------------------------- When one Git repository wants to get data that a second repository has, the first can 'fetch' from the second. This operation determines what data the server has that the client does not then streams that data down to the client in packfile format. Reference Discovery ------------------- When the client initially connects the server will immediately respond with a listing of each reference it has (all branches and tags) along with the object name that each reference currently points to. $ echo -e -n "0039git-upload-pack /schacon/gitbook.git\0host=example.com\0" | nc -v example.com 9418 00887217a7c7e582c46cec22a130adf4b9d7d950fba0 HEAD\0multi_ack thin-pack side-band side-band-64k ofs-delta shallow no-progress include-tag 00441d3fcd5ced445d1abc402225c0b8a1299641f497 refs/heads/integration 003f7217a7c7e582c46cec22a130adf4b9d7d950fba0 refs/heads/master 003cb88d2441cac0977faf98efc80305012112238d9d refs/tags/v0.9 003c525128480b96c89e6418b1e40909bf6c5b2d580f refs/tags/v1.0 003fe92df48743b7bc7d26bcaabfddde0a1e20cae47c refs/tags/v1.0^{} 0000 The returned response is a pkt-line stream describing each ref and its current value. The stream MUST be sorted by name according to the C locale ordering. If HEAD is a valid ref, HEAD MUST appear as the first advertised ref. If HEAD is not a valid ref, HEAD MUST NOT appear in the advertisement list at all, but other refs may still appear. The stream MUST include capability declarations behind a NUL on the first ref. The peeled value of a ref (that is "ref^{}") MUST be immediately after the ref itself, if presented. A conforming server MUST peel the ref if it's an annotated tag. ---- advertised-refs = (no-refs / list-of-refs) *shallow flush-pkt no-refs = PKT-LINE(zero-id SP "capabilities^{}" NUL capability-list) list-of-refs = first-ref *other-ref first-ref = PKT-LINE(obj-id SP refname NUL capability-list) other-ref = PKT-LINE(other-tip / other-peeled) other-tip = obj-id SP refname other-peeled = obj-id SP refname "^{}" shallow = PKT-LINE("shallow" SP obj-id) capability-list = capability *(SP capability) capability = 1*(LC_ALPHA / DIGIT / "-" / "_") LC_ALPHA = %x61-7A ---- Server and client MUST use lowercase for obj-id, both MUST treat obj-id as case-insensitive. See protocol-capabilities.txt for a list of allowed server capabilities and descriptions. Packfile Negotiation -------------------- After reference and capabilities discovery, the client can decide to terminate the connection by sending a flush-pkt, telling the server it can now gracefully terminate, and disconnect, when it does not need any pack data. This can happen with the ls-remote command, and also can happen when the client already is up-to-date. Otherwise, it enters the negotiation phase, where the client and server determine what the minimal packfile necessary for transport is, by telling the server what objects it wants, its shallow objects (if any), and the maximum commit depth it wants (if any). The client will also send a list of the capabilities it wants to be in effect, out of what the server said it could do with the first 'want' line. ---- upload-request = want-list *shallow-line *1depth-request flush-pkt want-list = first-want *additional-want shallow-line = PKT-LINE("shallow" SP obj-id) depth-request = PKT-LINE("deepen" SP depth) / PKT-LINE("deepen-since" SP timestamp) / PKT-LINE("deepen-not" SP ref) first-want = PKT-LINE("want" SP obj-id SP capability-list) additional-want = PKT-LINE("want" SP obj-id) depth = 1*DIGIT ---- Clients MUST send all the obj-ids it wants from the reference discovery phase as 'want' lines. Clients MUST send at least one 'want' command in the request body. Clients MUST NOT mention an obj-id in a 'want' command which did not appear in the response obtained through ref discovery. The client MUST write all obj-ids which it only has shallow copies of (meaning that it does not have the parents of a commit) as 'shallow' lines so that the server is aware of the limitations of the client's history. The client now sends the maximum commit history depth it wants for this transaction, which is the number of commits it wants from the tip of the history, if any, as a 'deepen' line. A depth of 0 is the same as not making a depth request. The client does not want to receive any commits beyond this depth, nor does it want objects needed only to complete those commits. Commits whose parents are not received as a result are defined as shallow and marked as such in the server. This information is sent back to the client in the next step. Once all the 'want's and 'shallow's (and optional 'deepen') are transferred, clients MUST send a flush-pkt, to tell the server side that it is done sending the list. Otherwise, if the client sent a positive depth request, the server will determine which commits will and will not be shallow and send this information to the client. If the client did not request a positive depth, this step is skipped. ---- shallow-update = *shallow-line *unshallow-line flush-pkt shallow-line = PKT-LINE("shallow" SP obj-id) unshallow-line = PKT-LINE("unshallow" SP obj-id) ---- If the client has requested a positive depth, the server will compute the set of commits which are no deeper than the desired depth. The set of commits start at the client's wants. The server writes 'shallow' lines for each commit whose parents will not be sent as a result. The server writes an 'unshallow' line for each commit which the client has indicated is shallow, but is no longer shallow at the currently requested depth (that is, its parents will now be sent). The server MUST NOT mark as unshallow anything which the client has not indicated was shallow. Now the client will send a list of the obj-ids it has using 'have' lines, so the server can make a packfile that only contains the objects that the client needs. In multi_ack mode, the canonical implementation will send up to 32 of these at a time, then will send a flush-pkt. The canonical implementation will skip ahead and send the next 32 immediately, so that there is always a block of 32 "in-flight on the wire" at a time. ---- upload-haves = have-list compute-end have-list = *have-line have-line = PKT-LINE("have" SP obj-id) compute-end = flush-pkt / PKT-LINE("done") ---- If the server reads 'have' lines, it then will respond by ACKing any of the obj-ids the client said it had that the server also has. The server will ACK obj-ids differently depending on which ack mode is chosen by the client. In multi_ack mode: * the server will respond with 'ACK obj-id continue' for any common commits. * once the server has found an acceptable common base commit and is ready to make a packfile, it will blindly ACK all 'have' obj-ids back to the client. * the server will then send a 'NAK' and then wait for another response from the client - either a 'done' or another list of 'have' lines. In multi_ack_detailed mode: * the server will differentiate the ACKs where it is signaling that it is ready to send data with 'ACK obj-id ready' lines, and signals the identified common commits with 'ACK obj-id common' lines. Without either multi_ack or multi_ack_detailed: * upload-pack sends "ACK obj-id" on the first common object it finds. After that it says nothing until the client gives it a "done". * upload-pack sends "NAK" on a flush-pkt if no common object has been found yet. If one has been found, and thus an ACK was already sent, it's silent on the flush-pkt. After the client has gotten enough ACK responses that it can determine that the server has enough information to send an efficient packfile (in the canonical implementation, this is determined when it has received enough ACKs that it can color everything left in the --date-order queue as common with the server, or the --date-order queue is empty), or the client determines that it wants to give up (in the canonical implementation, this is determined when the client sends 256 'have' lines without getting any of them ACKed by the server - meaning there is nothing in common and the server should just send all of its objects), then the client will send a 'done' command. The 'done' command signals to the server that the client is ready to receive its packfile data. However, the 256 limit *only* turns on in the canonical client implementation if we have received at least one "ACK %s continue" during a prior round. This helps to ensure that at least one common ancestor is found before we give up entirely. Once the 'done' line is read from the client, the server will either send a final 'ACK obj-id' or it will send a 'NAK'. 'obj-id' is the object name of the last commit determined to be common. The server only sends ACK after 'done' if there is at least one common base and multi_ack or multi_ack_detailed is enabled. The server always sends NAK after 'done' if there is no common base found. Then the server will start sending its packfile data. ---- server-response = *ack_multi ack / nak ack_multi = PKT-LINE("ACK" SP obj-id ack_status) ack_status = "continue" / "common" / "ready" ack = PKT-LINE("ACK" SP obj-id) nak = PKT-LINE("NAK") ---- A simple clone may look like this (with no 'have' lines): ---- C: 0054want 74730d410fcb6603ace96f1dc55ea6196122532d multi_ack \ side-band-64k ofs-delta\n C: 0032want 7d1665144a3a975c05f1f43902ddaf084e784dbe\n C: 0032want 5a3f6be755bbb7deae50065988cbfa1ffa9ab68a\n C: 0032want 7e47fe2bd8d01d481f44d7af0531bd93d3b21c01\n C: 0032want 74730d410fcb6603ace96f1dc55ea6196122532d\n C: 0000 C: 0009done\n S: 0008NAK\n S: [PACKFILE] ---- An incremental update (fetch) response might look like this: ---- C: 0054want 74730d410fcb6603ace96f1dc55ea6196122532d multi_ack \ side-band-64k ofs-delta\n C: 0032want 7d1665144a3a975c05f1f43902ddaf084e784dbe\n C: 0032want 5a3f6be755bbb7deae50065988cbfa1ffa9ab68a\n C: 0000 C: 0032have 7e47fe2bd8d01d481f44d7af0531bd93d3b21c01\n C: [30 more have lines] C: 0032have 74730d410fcb6603ace96f1dc55ea6196122532d\n C: 0000 S: 003aACK 7e47fe2bd8d01d481f44d7af0531bd93d3b21c01 continue\n S: 003aACK 74730d410fcb6603ace96f1dc55ea6196122532d continue\n S: 0008NAK\n C: 0009done\n S: 0031ACK 74730d410fcb6603ace96f1dc55ea6196122532d\n S: [PACKFILE] ---- Packfile Data ------------- Now that the client and server have finished negotiation about what the minimal amount of data that needs to be sent to the client is, the server will construct and send the required data in packfile format. See pack-format.txt for what the packfile itself actually looks like. If 'side-band' or 'side-band-64k' capabilities have been specified by the client, the server will send the packfile data multiplexed. Each packet starting with the packet-line length of the amount of data that follows, followed by a single byte specifying the sideband the following data is coming in on. In 'side-band' mode, it will send up to 999 data bytes plus 1 control code, for a total of up to 1000 bytes in a pkt-line. In 'side-band-64k' mode it will send up to 65519 data bytes plus 1 control code, for a total of up to 65520 bytes in a pkt-line. The sideband byte will be a '1', '2' or a '3'. Sideband '1' will contain packfile data, sideband '2' will be used for progress information that the client will generally print to stderr and sideband '3' is used for error information. If no 'side-band' capability was specified, the server will stream the entire packfile without multiplexing. Pushing Data To a Server ------------------------ Pushing data to a server will invoke the 'receive-pack' process on the server, which will allow the client to tell it which references it should update and then send all the data the server will need for those new references to be complete. Once all the data is received and validated, the server will then update its references to what the client specified. Authentication -------------- The protocol itself contains no authentication mechanisms. That is to be handled by the transport, such as SSH, before the 'receive-pack' process is invoked. If 'receive-pack' is configured over the Git transport, those repositories will be writable by anyone who can access that port (9418) as that transport is unauthenticated. Reference Discovery ------------------- The reference discovery phase is done nearly the same way as it is in the fetching protocol. Each reference obj-id and name on the server is sent in packet-line format to the client, followed by a flush-pkt. The only real difference is that the capability listing is different - the only possible values are 'report-status', 'delete-refs', 'ofs-delta' and 'push-options'. Reference Update Request and Packfile Transfer ---------------------------------------------- Once the client knows what references the server is at, it can send a list of reference update requests. For each reference on the server that it wants to update, it sends a line listing the obj-id currently on the server, the obj-id the client would like to update it to and the name of the reference. This list is followed by a flush-pkt. Then the push options are transmitted one per packet followed by another flush-pkt. After that the packfile that should contain all the objects that the server will need to complete the new references will be sent. ---- update-request = *shallow ( command-list | push-cert ) [packfile] shallow = PKT-LINE("shallow" SP obj-id) command-list = PKT-LINE(command NUL capability-list) *PKT-LINE(command) flush-pkt command = create / delete / update create = zero-id SP new-id SP name delete = old-id SP zero-id SP name update = old-id SP new-id SP name old-id = obj-id new-id = obj-id push-cert = PKT-LINE("push-cert" NUL capability-list LF) PKT-LINE("certificate version 0.1" LF) PKT-LINE("pusher" SP ident LF) PKT-LINE("pushee" SP url LF) PKT-LINE("nonce" SP nonce LF) PKT-LINE(LF) *PKT-LINE(command LF) *PKT-LINE(gpg-signature-lines LF) PKT-LINE("push-cert-end" LF) packfile = "PACK" 28*(OCTET) ---- If the receiving end does not support delete-refs, the sending end MUST NOT ask for delete command. If the receiving end does not support push-cert, the sending end MUST NOT send a push-cert command. When a push-cert command is sent, command-list MUST NOT be sent; the commands recorded in the push certificate is used instead. The packfile MUST NOT be sent if the only command used is 'delete'. A packfile MUST be sent if either create or update command is used, even if the server already has all the necessary objects. In this case the client MUST send an empty packfile. The only time this is likely to happen is if the client is creating a new branch or a tag that points to an existing obj-id. The server will receive the packfile, unpack it, then validate each reference that is being updated that it hasn't changed while the request was being processed (the obj-id is still the same as the old-id), and it will run any update hooks to make sure that the update is acceptable. If all of that is fine, the server will then update the references. Push Certificate ---------------- A push certificate begins with a set of header lines. After the header and an empty line, the protocol commands follow, one per line. Note that the trailing LF in push-cert PKT-LINEs is _not_ optional; it must be present. Currently, the following header fields are defined: `pusher` ident:: Identify the GPG key in "Human Readable Name <email@address>" format. `pushee` url:: The repository URL (anonymized, if the URL contains authentication material) the user who ran `git push` intended to push into. `nonce` nonce:: The 'nonce' string the receiving repository asked the pushing user to include in the certificate, to prevent replay attacks. The GPG signature lines are a detached signature for the contents recorded in the push certificate before the signature block begins. The detached signature is used to certify that the commands were given by the pusher, who must be the signer. Report Status ------------- After receiving the pack data from the sender, the receiver sends a report if 'report-status' capability is in effect. It is a short listing of what happened in that update. It will first list the status of the packfile unpacking as either 'unpack ok' or 'unpack [error]'. Then it will list the status for each of the references that it tried to update. Each line is either 'ok [refname]' if the update was successful, or 'ng [refname] [error]' if the update was not. ---- report-status = unpack-status 1*(command-status) flush-pkt unpack-status = PKT-LINE("unpack" SP unpack-result) unpack-result = "ok" / error-msg command-status = command-ok / command-fail command-ok = PKT-LINE("ok" SP refname) command-fail = PKT-LINE("ng" SP refname SP error-msg) error-msg = 1*(OCTECT) ; where not "ok" ---- Updates can be unsuccessful for a number of reasons. The reference can have changed since the reference discovery phase was originally sent, meaning someone pushed in the meantime. The reference being pushed could be a non-fast-forward reference and the update hooks or configuration could be set to not allow that, etc. Also, some references can be updated while others can be rejected. An example client/server communication might look like this: ---- S: 007c74730d410fcb6603ace96f1dc55ea6196122532d refs/heads/local\0report-status delete-refs ofs-delta\n S: 003e7d1665144a3a975c05f1f43902ddaf084e784dbe refs/heads/debug\n S: 003f74730d410fcb6603ace96f1dc55ea6196122532d refs/heads/master\n S: 003f74730d410fcb6603ace96f1dc55ea6196122532d refs/heads/team\n S: 0000 C: 003e7d1665144a3a975c05f1f43902ddaf084e784dbe 74730d410fcb6603ace96f1dc55ea6196122532d refs/heads/debug\n C: 003e74730d410fcb6603ace96f1dc55ea6196122532d 5a3f6be755bbb7deae50065988cbfa1ffa9ab68a refs/heads/master\n C: 0000 C: [PACKDATA] S: 000eunpack ok\n S: 0018ok refs/heads/debug\n S: 002ang refs/heads/master non-fast-forward\n ---- */
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/jesseduffield/go-git/v5/plumbing/protocol/packp/uppackreq.go
vendor/github.com/jesseduffield/go-git/v5/plumbing/protocol/packp/uppackreq.go
package packp import ( "bytes" "fmt" "io" "github.com/jesseduffield/go-git/v5/plumbing" "github.com/jesseduffield/go-git/v5/plumbing/format/pktline" "github.com/jesseduffield/go-git/v5/plumbing/protocol/packp/capability" ) // UploadPackRequest represents a upload-pack request. // Zero-value is not safe, use NewUploadPackRequest instead. type UploadPackRequest struct { UploadRequest UploadHaves } // NewUploadPackRequest creates a new UploadPackRequest and returns a pointer. func NewUploadPackRequest() *UploadPackRequest { ur := NewUploadRequest() return &UploadPackRequest{ UploadHaves: UploadHaves{}, UploadRequest: *ur, } } // NewUploadPackRequestFromCapabilities creates a new UploadPackRequest and // returns a pointer. The request capabilities are filled with the most optimal // ones, based on the adv value (advertised capabilities), the UploadPackRequest // it has no wants, haves or shallows and an infinite depth func NewUploadPackRequestFromCapabilities(adv *capability.List) *UploadPackRequest { ur := NewUploadRequestFromCapabilities(adv) return &UploadPackRequest{ UploadHaves: UploadHaves{}, UploadRequest: *ur, } } // IsEmpty returns whether a request is empty - it is empty if Haves are contained // in the Wants, or if Wants length is zero, and we don't have any shallows func (r *UploadPackRequest) IsEmpty() bool { return isSubset(r.Wants, r.Haves) && len(r.Shallows) == 0 } func isSubset(needle []plumbing.Hash, haystack []plumbing.Hash) bool { for _, h := range needle { found := false for _, oh := range haystack { if h == oh { found = true break } } if !found { return false } } return true } // UploadHaves is a message to signal the references that a client has in a // upload-pack. Do not use this directly. Use UploadPackRequest request instead. type UploadHaves struct { Haves []plumbing.Hash } // Encode encodes the UploadHaves into the Writer. If flush is true, a flush // command will be encoded at the end of the writer content. func (u *UploadHaves) Encode(w io.Writer, flush bool) error { e := pktline.NewEncoder(w) plumbing.HashesSort(u.Haves) var last plumbing.Hash for _, have := range u.Haves { if bytes.Equal(last[:], have[:]) { continue } if err := e.Encodef("have %s\n", have); err != nil { return fmt.Errorf("sending haves for %q: %s", have, err) } last = have } if flush && len(u.Haves) != 0 { if err := e.Flush(); err != nil { return fmt.Errorf("sending flush-pkt after haves: %s", err) } } return nil }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/jesseduffield/go-git/v5/plumbing/protocol/packp/shallowupd.go
vendor/github.com/jesseduffield/go-git/v5/plumbing/protocol/packp/shallowupd.go
package packp import ( "bytes" "fmt" "io" "github.com/jesseduffield/go-git/v5/plumbing" "github.com/jesseduffield/go-git/v5/plumbing/format/pktline" ) const ( shallowLineLen = 48 unshallowLineLen = 50 ) type ShallowUpdate struct { Shallows []plumbing.Hash Unshallows []plumbing.Hash } func (r *ShallowUpdate) Decode(reader io.Reader) error { s := pktline.NewScanner(reader) for s.Scan() { line := s.Bytes() line = bytes.TrimSpace(line) var err error switch { case bytes.HasPrefix(line, shallow): err = r.decodeShallowLine(line) case bytes.HasPrefix(line, unshallow): err = r.decodeUnshallowLine(line) case bytes.Equal(line, pktline.Flush): return nil } if err != nil { return err } } return s.Err() } func (r *ShallowUpdate) decodeShallowLine(line []byte) error { hash, err := r.decodeLine(line, shallow, shallowLineLen) if err != nil { return err } r.Shallows = append(r.Shallows, hash) return nil } func (r *ShallowUpdate) decodeUnshallowLine(line []byte) error { hash, err := r.decodeLine(line, unshallow, unshallowLineLen) if err != nil { return err } r.Unshallows = append(r.Unshallows, hash) return nil } func (r *ShallowUpdate) decodeLine(line, prefix []byte, expLen int) (plumbing.Hash, error) { if len(line) != expLen { return plumbing.ZeroHash, fmt.Errorf("malformed %s%q", prefix, line) } raw := string(line[expLen-40 : expLen]) return plumbing.NewHash(raw), nil } func (r *ShallowUpdate) Encode(w io.Writer) error { e := pktline.NewEncoder(w) for _, h := range r.Shallows { if err := e.Encodef("%s%s\n", shallow, h.String()); err != nil { return err } } for _, h := range r.Unshallows { if err := e.Encodef("%s%s\n", unshallow, h.String()); err != nil { return err } } return e.Flush() }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/jesseduffield/go-git/v5/plumbing/protocol/packp/advrefs_encode.go
vendor/github.com/jesseduffield/go-git/v5/plumbing/protocol/packp/advrefs_encode.go
package packp import ( "bytes" "fmt" "io" "sort" "github.com/jesseduffield/go-git/v5/plumbing" "github.com/jesseduffield/go-git/v5/plumbing/format/pktline" "github.com/jesseduffield/go-git/v5/plumbing/protocol/packp/capability" ) // Encode writes the AdvRefs encoding to a writer. // // All the payloads will end with a newline character. Capabilities, // references and shallows are written in alphabetical order, except for // peeled references that always follow their corresponding references. func (a *AdvRefs) Encode(w io.Writer) error { e := newAdvRefsEncoder(w) return e.Encode(a) } type advRefsEncoder struct { data *AdvRefs // data to encode pe *pktline.Encoder // where to write the encoded data firstRefName string // reference name to encode in the first pkt-line (HEAD if present) firstRefHash plumbing.Hash // hash referenced to encode in the first pkt-line (HEAD if present) sortedRefs []string // hash references to encode ordered by increasing order err error // sticky error } func newAdvRefsEncoder(w io.Writer) *advRefsEncoder { return &advRefsEncoder{ pe: pktline.NewEncoder(w), } } func (e *advRefsEncoder) Encode(v *AdvRefs) error { e.data = v e.sortRefs() e.setFirstRef() for state := encodePrefix; state != nil; { state = state(e) } return e.err } func (e *advRefsEncoder) sortRefs() { if len(e.data.References) > 0 { refs := make([]string, 0, len(e.data.References)) for refName := range e.data.References { refs = append(refs, refName) } sort.Strings(refs) e.sortedRefs = refs } } func (e *advRefsEncoder) setFirstRef() { if e.data.Head != nil { e.firstRefName = head e.firstRefHash = *e.data.Head return } if len(e.sortedRefs) > 0 { refName := e.sortedRefs[0] e.firstRefName = refName e.firstRefHash = e.data.References[refName] } } type encoderStateFn func(*advRefsEncoder) encoderStateFn func encodePrefix(e *advRefsEncoder) encoderStateFn { for _, p := range e.data.Prefix { if bytes.Equal(p, pktline.Flush) { if e.err = e.pe.Flush(); e.err != nil { return nil } continue } if e.err = e.pe.Encodef("%s\n", string(p)); e.err != nil { return nil } } return encodeFirstLine } // Adds the first pkt-line payload: head hash, head ref and capabilities. // If HEAD ref is not found, the first reference ordered in increasing order will be used. // If there aren't HEAD neither refs, the first line will be "PKT-LINE(zero-id SP "capabilities^{}" NUL capability-list)". // See: https://github.com/git/git/blob/master/Documentation/technical/pack-protocol.txt // See: https://github.com/git/git/blob/master/Documentation/technical/protocol-common.txt func encodeFirstLine(e *advRefsEncoder) encoderStateFn { const formatFirstLine = "%s %s\x00%s\n" var firstLine string capabilities := formatCaps(e.data.Capabilities) if e.firstRefName == "" { firstLine = fmt.Sprintf(formatFirstLine, plumbing.ZeroHash.String(), "capabilities^{}", capabilities) } else { firstLine = fmt.Sprintf(formatFirstLine, e.firstRefHash.String(), e.firstRefName, capabilities) } if e.err = e.pe.EncodeString(firstLine); e.err != nil { return nil } return encodeRefs } func formatCaps(c *capability.List) string { if c == nil { return "" } return c.String() } // Adds the (sorted) refs: hash SP refname EOL // and their peeled refs if any. func encodeRefs(e *advRefsEncoder) encoderStateFn { for _, r := range e.sortedRefs { if r == e.firstRefName { continue } hash := e.data.References[r] if e.err = e.pe.Encodef("%s %s\n", hash.String(), r); e.err != nil { return nil } if hash, ok := e.data.Peeled[r]; ok { if e.err = e.pe.Encodef("%s %s^{}\n", hash.String(), r); e.err != nil { return nil } } } return encodeShallow } // Adds the (sorted) shallows: "shallow" SP hash EOL func encodeShallow(e *advRefsEncoder) encoderStateFn { sorted := sortShallows(e.data.Shallows) for _, hash := range sorted { if e.err = e.pe.Encodef("shallow %s\n", hash); e.err != nil { return nil } } return encodeFlush } func sortShallows(c []plumbing.Hash) []string { ret := []string{} for _, h := range c { ret = append(ret, h.String()) } sort.Strings(ret) return ret } func encodeFlush(e *advRefsEncoder) encoderStateFn { e.err = e.pe.Flush() return nil }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/jesseduffield/go-git/v5/plumbing/protocol/packp/uppackresp.go
vendor/github.com/jesseduffield/go-git/v5/plumbing/protocol/packp/uppackresp.go
package packp import ( "errors" "io" "bufio" "github.com/jesseduffield/go-git/v5/plumbing/protocol/packp/capability" "github.com/jesseduffield/go-git/v5/utils/ioutil" ) // ErrUploadPackResponseNotDecoded is returned if Read is called without // decoding first var ErrUploadPackResponseNotDecoded = errors.New("upload-pack-response should be decoded") // UploadPackResponse contains all the information responded by the upload-pack // service, the response implements io.ReadCloser that allows to read the // packfile directly from it. type UploadPackResponse struct { ShallowUpdate ServerResponse r io.ReadCloser isShallow bool isMultiACK bool } // NewUploadPackResponse create a new UploadPackResponse instance, the request // being responded by the response is required. func NewUploadPackResponse(req *UploadPackRequest) *UploadPackResponse { isShallow := !req.Depth.IsZero() isMultiACK := req.Capabilities.Supports(capability.MultiACK) || req.Capabilities.Supports(capability.MultiACKDetailed) return &UploadPackResponse{ isShallow: isShallow, isMultiACK: isMultiACK, } } // NewUploadPackResponseWithPackfile creates a new UploadPackResponse instance, // and sets its packfile reader. func NewUploadPackResponseWithPackfile(req *UploadPackRequest, pf io.ReadCloser) *UploadPackResponse { r := NewUploadPackResponse(req) r.r = pf return r } // Decode decodes all the responses sent by upload-pack service into the struct // and prepares it to read the packfile using the Read method func (r *UploadPackResponse) Decode(reader io.ReadCloser) error { buf := bufio.NewReader(reader) if r.isShallow { if err := r.ShallowUpdate.Decode(buf); err != nil { return err } } if err := r.ServerResponse.Decode(buf, r.isMultiACK); err != nil { return err } // now the reader is ready to read the packfile content r.r = ioutil.NewReadCloser(buf, reader) return nil } // Encode encodes an UploadPackResponse. func (r *UploadPackResponse) Encode(w io.Writer) (err error) { if r.isShallow { if err := r.ShallowUpdate.Encode(w); err != nil { return err } } if err := r.ServerResponse.Encode(w, r.isMultiACK); err != nil { return err } defer ioutil.CheckClose(r.r, &err) _, err = io.Copy(w, r.r) return err } // Read reads the packfile data, if the request was done with any Sideband // capability the content read should be demultiplexed. If the methods wasn't // called before the ErrUploadPackResponseNotDecoded will be return func (r *UploadPackResponse) Read(p []byte) (int, error) { if r.r == nil { return 0, ErrUploadPackResponseNotDecoded } return r.r.Read(p) } // Close the underlying reader, if any func (r *UploadPackResponse) Close() error { if r.r == nil { return nil } return r.r.Close() }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/jesseduffield/go-git/v5/plumbing/protocol/packp/common.go
vendor/github.com/jesseduffield/go-git/v5/plumbing/protocol/packp/common.go
package packp import ( "fmt" ) type stateFn func() stateFn const ( // common hashSize = 40 // advrefs head = "HEAD" noHead = "capabilities^{}" ) var ( // common sp = []byte(" ") eol = []byte("\n") // advertised-refs null = []byte("\x00") peeled = []byte("^{}") noHeadMark = []byte(" capabilities^{}\x00") // upload-request want = []byte("want ") shallow = []byte("shallow ") deepen = []byte("deepen") deepenCommits = []byte("deepen ") deepenSince = []byte("deepen-since ") deepenReference = []byte("deepen-not ") // shallow-update unshallow = []byte("unshallow ") // server-response ack = []byte("ACK") nak = []byte("NAK") // updreq shallowNoSp = []byte("shallow") ) func isFlush(payload []byte) bool { return len(payload) == 0 } var ( // ErrNilWriter is returned when a nil writer is passed to the encoder. ErrNilWriter = fmt.Errorf("nil writer") ) // ErrUnexpectedData represents an unexpected data decoding a message type ErrUnexpectedData struct { Msg string Data []byte } // NewErrUnexpectedData returns a new ErrUnexpectedData containing the data and // the message given func NewErrUnexpectedData(msg string, data []byte) error { return &ErrUnexpectedData{Msg: msg, Data: data} } func (err *ErrUnexpectedData) Error() string { if len(err.Data) == 0 { return err.Msg } return fmt.Sprintf("%s (%s)", err.Msg, err.Data) }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/jesseduffield/go-git/v5/plumbing/protocol/packp/sideband/muxer.go
vendor/github.com/jesseduffield/go-git/v5/plumbing/protocol/packp/sideband/muxer.go
package sideband import ( "io" "github.com/jesseduffield/go-git/v5/plumbing/format/pktline" ) // Muxer multiplex the packfile along with the progress messages and the error // information. The multiplex is perform using pktline format. type Muxer struct { max int e *pktline.Encoder } const chLen = 1 // NewMuxer returns a new Muxer for the given t that writes on w. // // If t is equal to `Sideband` the max pack size is set to MaxPackedSize, in any // other value is given, max pack is set to MaxPackedSize64k, that is the // maximum length of a line in pktline format. func NewMuxer(t Type, w io.Writer) *Muxer { max := MaxPackedSize64k if t == Sideband { max = MaxPackedSize } return &Muxer{ max: max - chLen, e: pktline.NewEncoder(w), } } // Write writes p in the PackData channel func (m *Muxer) Write(p []byte) (int, error) { return m.WriteChannel(PackData, p) } // WriteChannel writes p in the given channel. This method can be used with any // channel, but is recommend use it only for the ProgressMessage and // ErrorMessage channels and use Write for the PackData channel func (m *Muxer) WriteChannel(t Channel, p []byte) (int, error) { wrote := 0 size := len(p) for wrote < size { n, err := m.doWrite(t, p[wrote:]) wrote += n if err != nil { return wrote, err } } return wrote, nil } func (m *Muxer) doWrite(ch Channel, p []byte) (int, error) { sz := len(p) if sz > m.max { sz = m.max } return sz, m.e.Encode(ch.WithPayload(p[:sz])) }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/jesseduffield/go-git/v5/plumbing/protocol/packp/sideband/doc.go
vendor/github.com/jesseduffield/go-git/v5/plumbing/protocol/packp/sideband/doc.go
// Package sideband implements a sideband mutiplex/demultiplexer package sideband // If 'side-band' or 'side-band-64k' capabilities have been specified by // the client, the server will send the packfile data multiplexed. // // Either mode indicates that the packfile data will be streamed broken // up into packets of up to either 1000 bytes in the case of 'side_band', // or 65520 bytes in the case of 'side_band_64k'. Each packet is made up // of a leading 4-byte pkt-line length of how much data is in the packet, // followed by a 1-byte stream code, followed by the actual data. // // The stream code can be one of: // // 1 - pack data // 2 - progress messages // 3 - fatal error message just before stream aborts // // The "side-band-64k" capability came about as a way for newer clients // that can handle much larger packets to request packets that are // actually crammed nearly full, while maintaining backward compatibility // for the older clients. // // Further, with side-band and its up to 1000-byte messages, it's actually // 999 bytes of payload and 1 byte for the stream code. With side-band-64k, // same deal, you have up to 65519 bytes of data and 1 byte for the stream // code. // // The client MUST send only maximum of one of "side-band" and "side- // band-64k". Server MUST diagnose it as an error if client requests // both.
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/jesseduffield/go-git/v5/plumbing/protocol/packp/sideband/demux.go
vendor/github.com/jesseduffield/go-git/v5/plumbing/protocol/packp/sideband/demux.go
package sideband import ( "errors" "fmt" "io" "github.com/jesseduffield/go-git/v5/plumbing/format/pktline" ) // ErrMaxPackedExceeded returned by Read, if the maximum packed size is exceeded var ErrMaxPackedExceeded = errors.New("max. packed size exceeded") // Progress where the progress information is stored type Progress interface { io.Writer } // Demuxer demultiplexes the progress reports and error info interleaved with the // packfile itself. // // A sideband has three different channels the main one, called PackData, contains // the packfile data; the ErrorMessage channel, that contains server errors; and // the last one, ProgressMessage channel, containing information about the ongoing // task happening in the server (optional, can be suppressed sending NoProgress // or Quiet capabilities to the server) // // In order to demultiplex the data stream, method `Read` should be called to // retrieve the PackData channel, the incoming data from the ProgressMessage is // written at `Progress` (if any), if any message is retrieved from the // ErrorMessage channel an error is returned and we can assume that the // connection has been closed. type Demuxer struct { t Type r io.Reader s *pktline.Scanner max int pending []byte // Progress is where the progress messages are stored Progress Progress } // NewDemuxer returns a new Demuxer for the given t and read from r func NewDemuxer(t Type, r io.Reader) *Demuxer { max := MaxPackedSize64k if t == Sideband { max = MaxPackedSize } return &Demuxer{ t: t, r: r, max: max, s: pktline.NewScanner(r), } } // Read reads up to len(p) bytes from the PackData channel into p, an error can // be return if an error happens when reading or if a message is sent in the // ErrorMessage channel. // // When a ProgressMessage is read, is not copy to b, instead of this is written // to the Progress func (d *Demuxer) Read(b []byte) (n int, err error) { var read, req int req = len(b) for read < req { n, err := d.doRead(b[read:req]) read += n if err != nil { return read, err } } return read, nil } func (d *Demuxer) doRead(b []byte) (int, error) { read, err := d.nextPackData() size := len(read) wanted := len(b) if size > wanted { d.pending = read[wanted:] } if wanted > size { wanted = size } size = copy(b, read[:wanted]) return size, err } func (d *Demuxer) nextPackData() ([]byte, error) { content := d.getPending() if len(content) != 0 { return content, nil } if !d.s.Scan() { if err := d.s.Err(); err != nil { return nil, err } return nil, io.EOF } content = d.s.Bytes() size := len(content) if size == 0 { return nil, io.EOF } else if size > d.max { return nil, ErrMaxPackedExceeded } switch Channel(content[0]) { case PackData: return content[1:], nil case ProgressMessage: if d.Progress != nil { _, err := d.Progress.Write(content[1:]) return nil, err } case ErrorMessage: return nil, fmt.Errorf("unexpected error: %s", content[1:]) default: return nil, fmt.Errorf("unknown channel %s", content) } return nil, nil } func (d *Demuxer) getPending() (b []byte) { if len(d.pending) == 0 { return nil } content := d.pending d.pending = nil return content }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/jesseduffield/go-git/v5/plumbing/protocol/packp/sideband/common.go
vendor/github.com/jesseduffield/go-git/v5/plumbing/protocol/packp/sideband/common.go
package sideband // Type sideband type "side-band" or "side-band-64k" type Type int8 const ( // Sideband legacy sideband type up to 1000-byte messages Sideband Type = iota // Sideband64k sideband type up to 65519-byte messages Sideband64k Type = iota // MaxPackedSize for Sideband type MaxPackedSize = 1000 // MaxPackedSize64k for Sideband64k type MaxPackedSize64k = 65520 ) // Channel sideband channel type Channel byte // WithPayload encode the payload as a message func (ch Channel) WithPayload(payload []byte) []byte { return append([]byte{byte(ch)}, payload...) } const ( // PackData packfile content PackData Channel = 1 // ProgressMessage progress messages ProgressMessage Channel = 2 // ErrorMessage fatal error message just before stream aborts ErrorMessage Channel = 3 )
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/jesseduffield/go-git/v5/plumbing/protocol/packp/capability/capability.go
vendor/github.com/jesseduffield/go-git/v5/plumbing/protocol/packp/capability/capability.go
// Package capability defines the server and client capabilities. package capability import ( "fmt" "os" ) // Capability describes a server or client capability. type Capability string func (n Capability) String() string { return string(n) } const ( // MultiACK capability allows the server to return "ACK obj-id continue" as // soon as it finds a commit that it can use as a common base, between the // client's wants and the client's have set. // // By sending this early, the server can potentially head off the client // from walking any further down that particular branch of the client's // repository history. The client may still need to walk down other // branches, sending have lines for those, until the server has a // complete cut across the DAG, or the client has said "done". // // Without multi_ack, a client sends have lines in --date-order until // the server has found a common base. That means the client will send // have lines that are already known by the server to be common, because // they overlap in time with another branch that the server hasn't found // a common base on yet. // // For example suppose the client has commits in caps that the server // doesn't and the server has commits in lower case that the client // doesn't, as in the following diagram: // // +---- u ---------------------- x // / +----- y // / / // a -- b -- c -- d -- E -- F // \ // +--- Q -- R -- S // // If the client wants x,y and starts out by saying have F,S, the server // doesn't know what F,S is. Eventually the client says "have d" and // the server sends "ACK d continue" to let the client know to stop // walking down that line (so don't send c-b-a), but it's not done yet, // it needs a base for x. The client keeps going with S-R-Q, until a // gets reached, at which point the server has a clear base and it all // ends. // // Without multi_ack the client would have sent that c-b-a chain anyway, // interleaved with S-R-Q. MultiACK Capability = "multi_ack" // MultiACKDetailed is an extension of multi_ack that permits client to // better understand the server's in-memory state. MultiACKDetailed Capability = "multi_ack_detailed" // NoDone should only be used with the smart HTTP protocol. If // multi_ack_detailed and no-done are both present, then the sender is // free to immediately send a pack following its first "ACK obj-id ready" // message. // // Without no-done in the smart HTTP protocol, the server session would // end and the client has to make another trip to send "done" before // the server can send the pack. no-done removes the last round and // thus slightly reduces latency. NoDone Capability = "no-done" // ThinPack is one with deltas which reference base objects not // contained within the pack (but are known to exist at the receiving // end). This can reduce the network traffic significantly, but it // requires the receiving end to know how to "thicken" these packs by // adding the missing bases to the pack. // // The upload-pack server advertises 'thin-pack' when it can generate // and send a thin pack. A client requests the 'thin-pack' capability // when it understands how to "thicken" it, notifying the server that // it can receive such a pack. A client MUST NOT request the // 'thin-pack' capability if it cannot turn a thin pack into a // self-contained pack. // // Receive-pack, on the other hand, is assumed by default to be able to // handle thin packs, but can ask the client not to use the feature by // advertising the 'no-thin' capability. A client MUST NOT send a thin // pack if the server advertises the 'no-thin' capability. // // The reasons for this asymmetry are historical. The receive-pack // program did not exist until after the invention of thin packs, so // historically the reference implementation of receive-pack always // understood thin packs. Adding 'no-thin' later allowed receive-pack // to disable the feature in a backwards-compatible manner. ThinPack Capability = "thin-pack" // Sideband means that server can send, and client understand multiplexed // progress reports and error info interleaved with the packfile itself. // // These two options are mutually exclusive. A modern client always // favors Sideband64k. // // Either mode indicates that the packfile data will be streamed broken // up into packets of up to either 1000 bytes in the case of 'side_band', // or 65520 bytes in the case of 'side_band_64k'. Each packet is made up // of a leading 4-byte pkt-line length of how much data is in the packet, // followed by a 1-byte stream code, followed by the actual data. // // The stream code can be one of: // // 1 - pack data // 2 - progress messages // 3 - fatal error message just before stream aborts // // The "side-band-64k" capability came about as a way for newer clients // that can handle much larger packets to request packets that are // actually crammed nearly full, while maintaining backward compatibility // for the older clients. // // Further, with side-band and its up to 1000-byte messages, it's actually // 999 bytes of payload and 1 byte for the stream code. With side-band-64k, // same deal, you have up to 65519 bytes of data and 1 byte for the stream // code. // // The client MUST send only maximum of one of "side-band" and "side- // band-64k". Server MUST diagnose it as an error if client requests // both. Sideband Capability = "side-band" Sideband64k Capability = "side-band-64k" // OFSDelta server can send, and client understand PACKv2 with delta // referring to its base by position in pack rather than by an obj-id. That // is, they can send/read OBJ_OFS_DELTA (aka type 6) in a packfile. OFSDelta Capability = "ofs-delta" // Agent the server may optionally send this capability to notify the client // that the server is running version `X`. The client may optionally return // its own agent string by responding with an `agent=Y` capability (but it // MUST NOT do so if the server did not mention the agent capability). The // `X` and `Y` strings may contain any printable ASCII characters except // space (i.e., the byte range 32 < x < 127), and are typically of the form // "package/version" (e.g., "git/1.8.3.1"). The agent strings are purely // informative for statistics and debugging purposes, and MUST NOT be used // to programmatically assume the presence or absence of particular features. Agent Capability = "agent" // Shallow capability adds "deepen", "shallow" and "unshallow" commands to // the fetch-pack/upload-pack protocol so clients can request shallow // clones. Shallow Capability = "shallow" // DeepenSince adds "deepen-since" command to fetch-pack/upload-pack // protocol so the client can request shallow clones that are cut at a // specific time, instead of depth. Internally it's equivalent of doing // "rev-list --max-age=<timestamp>" on the server side. "deepen-since" // cannot be used with "deepen". DeepenSince Capability = "deepen-since" // DeepenNot adds "deepen-not" command to fetch-pack/upload-pack // protocol so the client can request shallow clones that are cut at a // specific revision, instead of depth. Internally it's equivalent of // doing "rev-list --not <rev>" on the server side. "deepen-not" // cannot be used with "deepen", but can be used with "deepen-since". DeepenNot Capability = "deepen-not" // DeepenRelative if this capability is requested by the client, the // semantics of "deepen" command is changed. The "depth" argument is the // depth from the current shallow boundary, instead of the depth from // remote refs. DeepenRelative Capability = "deepen-relative" // NoProgress the client was started with "git clone -q" or something, and // doesn't want that side band 2. Basically the client just says "I do not // wish to receive stream 2 on sideband, so do not send it to me, and if // you did, I will drop it on the floor anyway". However, the sideband // channel 3 is still used for error responses. NoProgress Capability = "no-progress" // IncludeTag capability is about sending annotated tags if we are // sending objects they point to. If we pack an object to the client, and // a tag object points exactly at that object, we pack the tag object too. // In general this allows a client to get all new annotated tags when it // fetches a branch, in a single network connection. // // Clients MAY always send include-tag, hardcoding it into a request when // the server advertises this capability. The decision for a client to // request include-tag only has to do with the client's desires for tag // data, whether or not a server had advertised objects in the // refs/tags/* namespace. // // Servers MUST pack the tags if their referrant is packed and the client // has requested include-tags. // // Clients MUST be prepared for the case where a server has ignored // include-tag and has not actually sent tags in the pack. In such // cases the client SHOULD issue a subsequent fetch to acquire the tags // that include-tag would have otherwise given the client. // // The server SHOULD send include-tag, if it supports it, regardless // of whether or not there are tags available. IncludeTag Capability = "include-tag" // ReportStatus the receive-pack process can receive a 'report-status' // capability, which tells it that the client wants a report of what // happened after a packfile upload and reference update. If the pushing // client requests this capability, after unpacking and updating references // the server will respond with whether the packfile unpacked successfully // and if each reference was updated successfully. If any of those were not // successful, it will send back an error message. See pack-protocol.txt // for example messages. ReportStatus Capability = "report-status" // DeleteRefs If the server sends back this capability, it means that // it is capable of accepting a zero-id value as the target // value of a reference update. It is not sent back by the client, it // simply informs the client that it can be sent zero-id values // to delete references DeleteRefs Capability = "delete-refs" // Quiet If the receive-pack server advertises this capability, it is // capable of silencing human-readable progress output which otherwise may // be shown when processing the received pack. A send-pack client should // respond with the 'quiet' capability to suppress server-side progress // reporting if the local progress reporting is also being suppressed // (e.g., via `push -q`, or if stderr does not go to a tty). Quiet Capability = "quiet" // Atomic If the server sends this capability it is capable of accepting // atomic pushes. If the pushing client requests this capability, the server // will update the refs in one atomic transaction. Either all refs are // updated or none. Atomic Capability = "atomic" // PushOptions If the server sends this capability it is able to accept // push options after the update commands have been sent, but before the // packfile is streamed. If the pushing client requests this capability, // the server will pass the options to the pre- and post- receive hooks // that process this push request. PushOptions Capability = "push-options" // AllowTipSHA1InWant if the upload-pack server advertises this capability, // fetch-pack may send "want" lines with SHA-1s that exist at the server but // are not advertised by upload-pack. AllowTipSHA1InWant Capability = "allow-tip-sha1-in-want" // AllowReachableSHA1InWant if the upload-pack server advertises this // capability, fetch-pack may send "want" lines with SHA-1s that exist at // the server but are not advertised by upload-pack. AllowReachableSHA1InWant Capability = "allow-reachable-sha1-in-want" // PushCert the receive-pack server that advertises this capability is // willing to accept a signed push certificate, and asks the <nonce> to be // included in the push certificate. A send-pack client MUST NOT // send a push-cert packet unless the receive-pack server advertises // this capability. PushCert Capability = "push-cert" // SymRef symbolic reference support for better negotiation. SymRef Capability = "symref" // ObjectFormat takes a hash algorithm as an argument, indicates that the // server supports the given hash algorithms. ObjectFormat Capability = "object-format" // Filter if present, fetch-pack may send "filter" commands to request a // partial clone or partial fetch and request that the server omit various objects from the packfile Filter Capability = "filter" ) const userAgent = "go-git/5.x" // DefaultAgent provides the user agent string. func DefaultAgent() string { if envUserAgent, ok := os.LookupEnv("GO_GIT_USER_AGENT_EXTRA"); ok { return fmt.Sprintf("%s %s", userAgent, envUserAgent) } return userAgent } var known = map[Capability]bool{ MultiACK: true, MultiACKDetailed: true, NoDone: true, ThinPack: true, Sideband: true, Sideband64k: true, OFSDelta: true, Agent: true, Shallow: true, DeepenSince: true, DeepenNot: true, DeepenRelative: true, NoProgress: true, IncludeTag: true, ReportStatus: true, DeleteRefs: true, Quiet: true, Atomic: true, PushOptions: true, AllowTipSHA1InWant: true, AllowReachableSHA1InWant: true, PushCert: true, SymRef: true, ObjectFormat: true, Filter: true, } var requiresArgument = map[Capability]bool{ Agent: true, PushCert: true, SymRef: true, ObjectFormat: true, } var multipleArgument = map[Capability]bool{ SymRef: true, }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/jesseduffield/go-git/v5/plumbing/protocol/packp/capability/list.go
vendor/github.com/jesseduffield/go-git/v5/plumbing/protocol/packp/capability/list.go
package capability import ( "bytes" "errors" "fmt" "strings" ) var ( // ErrArgumentsRequired is returned if no arguments are giving with a // capability that requires arguments ErrArgumentsRequired = errors.New("arguments required") // ErrArguments is returned if arguments are given with a capabilities that // not supports arguments ErrArguments = errors.New("arguments not allowed") // ErrEmptyArgument is returned when an empty value is given ErrEmptyArgument = errors.New("empty argument") // ErrMultipleArguments multiple argument given to a capabilities that not // support it ErrMultipleArguments = errors.New("multiple arguments not allowed") ) // List represents a list of capabilities type List struct { m map[Capability]*entry sort []string } type entry struct { Name Capability Values []string } // NewList returns a new List of capabilities func NewList() *List { return &List{ m: make(map[Capability]*entry), } } // IsEmpty returns true if the List is empty func (l *List) IsEmpty() bool { return len(l.sort) == 0 } // Decode decodes list of capabilities from raw into the list func (l *List) Decode(raw []byte) error { // git 1.x receive pack used to send a leading space on its // git-receive-pack capabilities announcement. We just trim space to be // tolerant to space changes in different versions. raw = bytes.TrimSpace(raw) if len(raw) == 0 { return nil } for _, data := range bytes.Split(raw, []byte{' '}) { pair := bytes.SplitN(data, []byte{'='}, 2) c := Capability(pair[0]) if len(pair) == 1 { if err := l.Add(c); err != nil { return err } continue } if err := l.Add(c, string(pair[1])); err != nil { return err } } return nil } // Get returns the values for a capability func (l *List) Get(capability Capability) []string { if _, ok := l.m[capability]; !ok { return nil } return l.m[capability].Values } // Set sets a capability removing the previous values func (l *List) Set(capability Capability, values ...string) error { if _, ok := l.m[capability]; ok { l.m[capability].Values = l.m[capability].Values[:0] } return l.Add(capability, values...) } // Add adds a capability, values are optional func (l *List) Add(c Capability, values ...string) error { if err := l.validate(c, values); err != nil { return err } if !l.Supports(c) { l.m[c] = &entry{Name: c} l.sort = append(l.sort, c.String()) } if len(values) == 0 { return nil } if known[c] && !multipleArgument[c] && len(l.m[c].Values) > 0 { return ErrMultipleArguments } l.m[c].Values = append(l.m[c].Values, values...) return nil } func (l *List) validateNoEmptyArgs(values []string) error { for _, v := range values { if v == "" { return ErrEmptyArgument } } return nil } func (l *List) validate(c Capability, values []string) error { if !known[c] { return l.validateNoEmptyArgs(values) } if requiresArgument[c] && len(values) == 0 { return ErrArgumentsRequired } if !requiresArgument[c] && len(values) != 0 { return ErrArguments } if !multipleArgument[c] && len(values) > 1 { return ErrMultipleArguments } return l.validateNoEmptyArgs(values) } // Supports returns true if capability is present func (l *List) Supports(capability Capability) bool { _, ok := l.m[capability] return ok } // Delete deletes a capability from the List func (l *List) Delete(capability Capability) { if !l.Supports(capability) { return } delete(l.m, capability) for i, c := range l.sort { if c != string(capability) { continue } l.sort = append(l.sort[:i], l.sort[i+1:]...) return } } // All returns a slice with all defined capabilities. func (l *List) All() []Capability { var cs []Capability for _, key := range l.sort { cs = append(cs, Capability(key)) } return cs } // String generates the capabilities strings, the capabilities are sorted in // insertion order func (l *List) String() string { var o []string for _, key := range l.sort { cap := l.m[Capability(key)] if len(cap.Values) == 0 { o = append(o, key) continue } for _, value := range cap.Values { o = append(o, fmt.Sprintf("%s=%s", key, value)) } } return strings.Join(o, " ") }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/jesseduffield/lazycore/pkg/boxlayout/boxlayout.go
vendor/github.com/jesseduffield/lazycore/pkg/boxlayout/boxlayout.go
package boxlayout import ( "github.com/jesseduffield/lazycore/pkg/utils" "github.com/samber/lo" ) type Dimensions struct { X0 int X1 int Y0 int Y1 int } type Direction int const ( ROW Direction = iota COLUMN ) // to give a high-level explanation of what's going on here. We layout our windows by arranging a bunch of boxes in the available space. // If a box has children, it needs to specify how it wants to arrange those children: ROW or COLUMN. // If a box represents a window, you can put the window name in the Window field. // When determining how to divvy-up the available height (for row children) or width (for column children), we first // give the boxes with a static `size` the space that they want. Then we apportion // the remaining space based on the weights of the dynamic boxes (you can't define // both size and weight at the same time: you gotta pick one). If there are two // boxes, one with weight 1 and the other with weight 2, the first one gets 33% // of the available space and the second one gets the remaining 66% type Box struct { // Direction decides how the children boxes are laid out. ROW means the children will each form a row i.e. that they will be stacked on top of eachother. Direction Direction // function which takes the width and height assigned to the box and decides which orientation it will have ConditionalDirection func(width int, height int) Direction Children []*Box // function which takes the width and height assigned to the box and decides the layout of the children. ConditionalChildren func(width int, height int) []*Box // Window refers to the name of the window this box represents, if there is one Window string // static Size. If parent box's direction is ROW this refers to height, otherwise width Size int // dynamic size. Once all statically sized children have been considered, Weight decides how much of the remaining space will be taken up by the box // TODO: consider making there be one int and a type enum so we can't have size and Weight simultaneously defined Weight int } func ArrangeWindows(root *Box, x0, y0, width, height int) map[string]Dimensions { children := root.getChildren(width, height) if len(children) == 0 { // leaf node if root.Window != "" { dimensionsForWindow := Dimensions{X0: x0, Y0: y0, X1: x0 + width - 1, Y1: y0 + height - 1} return map[string]Dimensions{root.Window: dimensionsForWindow} } return map[string]Dimensions{} } direction := root.getDirection(width, height) var availableSize int if direction == COLUMN { availableSize = width } else { availableSize = height } sizes := calcSizes(children, availableSize) result := map[string]Dimensions{} offset := 0 for i, child := range children { boxSize := sizes[i] var resultForChild map[string]Dimensions if direction == COLUMN { resultForChild = ArrangeWindows(child, x0+offset, y0, boxSize, height) } else { resultForChild = ArrangeWindows(child, x0, y0+offset, width, boxSize) } result = mergeDimensionMaps(result, resultForChild) offset += boxSize } return result } func calcSizes(boxes []*Box, availableSpace int) []int { normalizedWeights := normalizeWeights(lo.Map(boxes, func(box *Box, _ int) int { return box.Weight })) totalWeight := 0 reservedSpace := 0 for i, box := range boxes { if box.isStatic() { reservedSpace += box.Size } else { totalWeight += normalizedWeights[i] } } dynamicSpace := utils.Max(0, availableSpace-reservedSpace) unitSize := 0 extraSpace := 0 if totalWeight > 0 { unitSize = dynamicSpace / totalWeight extraSpace = dynamicSpace % totalWeight } result := make([]int, len(boxes)) for i, box := range boxes { if box.isStatic() { // assuming that only one static child can have a size greater than the // available space. In that case we just crop the size to what's available result[i] = utils.Min(availableSpace, box.Size) } else { result[i] = unitSize * normalizedWeights[i] } } // distribute the remainder across dynamic boxes. for extraSpace > 0 { for i, weight := range normalizedWeights { if weight > 0 { result[i]++ extraSpace-- normalizedWeights[i]-- if extraSpace == 0 { break } } } } return result } // removes common multiple from weights e.g. if we get 2, 4, 4 we return 1, 2, 2. func normalizeWeights(weights []int) []int { if len(weights) == 0 { return []int{} } // to spare us some computation we'll exit early if any of our weights is 1 if lo.SomeBy(weights, func(weight int) bool { return weight == 1 }) { return weights } // map weights to factorSlices and find the lowest common factor positiveWeights := lo.Filter(weights, func(weight int, _ int) bool { return weight > 0 }) factorSlices := lo.Map(positiveWeights, func(weight int, _ int) []int { return calcFactors(weight) }) commonFactors := factorSlices[0] for _, factors := range factorSlices { commonFactors = lo.Intersect(commonFactors, factors) } if len(commonFactors) == 0 { return weights } newWeights := lo.Map(weights, func(weight int, _ int) int { return weight / commonFactors[0] }) return normalizeWeights(newWeights) } func calcFactors(n int) []int { factors := []int{} for i := 2; i <= n; i++ { if n%i == 0 { factors = append(factors, i) } } return factors } func (b *Box) isStatic() bool { return b.Size > 0 } func (b *Box) getDirection(width int, height int) Direction { if b.ConditionalDirection != nil { return b.ConditionalDirection(width, height) } return b.Direction } func (b *Box) getChildren(width int, height int) []*Box { if b.ConditionalChildren != nil { return b.ConditionalChildren(width, height) } return b.Children } func mergeDimensionMaps(a map[string]Dimensions, b map[string]Dimensions) map[string]Dimensions { result := map[string]Dimensions{} for _, dimensionMap := range []map[string]Dimensions{a, b} { for k, v := range dimensionMap { result[k] = v } } return result }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/jesseduffield/lazycore/pkg/utils/utils.go
vendor/github.com/jesseduffield/lazycore/pkg/utils/utils.go
package utils import ( "log" "os" "path/filepath" ) // Min returns the minimum of two integers func Min(x, y int) int { if x < y { return x } return y } // Max returns the maximum of two integers func Max(x, y int) int { if x > y { return x } return y } // Clamp returns a value x restricted between min and max func Clamp(x int, min int, max int) int { if x < min { return min } else if x > max { return max } return x } // GetLazyRootDirectory finds a lazy project root directory. // // It's used for cheatsheet scripts and integration tests. Not to be confused with finding the // root directory of _any_ random repo. func GetLazyRootDirectory() string { path, err := os.Getwd() if err != nil { panic(err) } for { _, err := os.Stat(filepath.Join(path, ".git")) if err == nil { return path } if !os.IsNotExist(err) { panic(err) } path = filepath.Dir(path) if path == "/" { log.Fatal("must run in lazy project folder or child folder") } } }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/bahlo/generic-list-go/list.go
vendor/github.com/bahlo/generic-list-go/list.go
// Copyright 2009 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Package list implements a doubly linked list. // // To iterate over a list (where l is a *List): // for e := l.Front(); e != nil; e = e.Next() { // // do something with e.Value // } // package list // Element is an element of a linked list. type Element[T any] struct { // Next and previous pointers in the doubly-linked list of elements. // To simplify the implementation, internally a list l is implemented // as a ring, such that &l.root is both the next element of the last // list element (l.Back()) and the previous element of the first list // element (l.Front()). next, prev *Element[T] // The list to which this element belongs. list *List[T] // The value stored with this element. Value T } // Next returns the next list element or nil. func (e *Element[T]) Next() *Element[T] { if p := e.next; e.list != nil && p != &e.list.root { return p } return nil } // Prev returns the previous list element or nil. func (e *Element[T]) Prev() *Element[T] { if p := e.prev; e.list != nil && p != &e.list.root { return p } return nil } // List represents a doubly linked list. // The zero value for List is an empty list ready to use. type List[T any] struct { root Element[T] // sentinel list element, only &root, root.prev, and root.next are used len int // current list length excluding (this) sentinel element } // Init initializes or clears list l. func (l *List[T]) Init() *List[T] { l.root.next = &l.root l.root.prev = &l.root l.len = 0 return l } // New returns an initialized list. func New[T any]() *List[T] { return new(List[T]).Init() } // Len returns the number of elements of list l. // The complexity is O(1). func (l *List[T]) Len() int { return l.len } // Front returns the first element of list l or nil if the list is empty. func (l *List[T]) Front() *Element[T] { if l.len == 0 { return nil } return l.root.next } // Back returns the last element of list l or nil if the list is empty. func (l *List[T]) Back() *Element[T] { if l.len == 0 { return nil } return l.root.prev } // lazyInit lazily initializes a zero List value. func (l *List[T]) lazyInit() { if l.root.next == nil { l.Init() } } // insert inserts e after at, increments l.len, and returns e. func (l *List[T]) insert(e, at *Element[T]) *Element[T] { e.prev = at e.next = at.next e.prev.next = e e.next.prev = e e.list = l l.len++ return e } // insertValue is a convenience wrapper for insert(&Element{Value: v}, at). func (l *List[T]) insertValue(v T, at *Element[T]) *Element[T] { return l.insert(&Element[T]{Value: v}, at) } // remove removes e from its list, decrements l.len func (l *List[T]) remove(e *Element[T]) { e.prev.next = e.next e.next.prev = e.prev e.next = nil // avoid memory leaks e.prev = nil // avoid memory leaks e.list = nil l.len-- } // move moves e to next to at. func (l *List[T]) move(e, at *Element[T]) { if e == at { return } e.prev.next = e.next e.next.prev = e.prev e.prev = at e.next = at.next e.prev.next = e e.next.prev = e } // Remove removes e from l if e is an element of list l. // It returns the element value e.Value. // The element must not be nil. func (l *List[T]) Remove(e *Element[T]) T { if e.list == l { // if e.list == l, l must have been initialized when e was inserted // in l or l == nil (e is a zero Element) and l.remove will crash l.remove(e) } return e.Value } // PushFront inserts a new element e with value v at the front of list l and returns e. func (l *List[T]) PushFront(v T) *Element[T] { l.lazyInit() return l.insertValue(v, &l.root) } // PushBack inserts a new element e with value v at the back of list l and returns e. func (l *List[T]) PushBack(v T) *Element[T] { l.lazyInit() return l.insertValue(v, l.root.prev) } // InsertBefore inserts a new element e with value v immediately before mark and returns e. // If mark is not an element of l, the list is not modified. // The mark must not be nil. func (l *List[T]) InsertBefore(v T, mark *Element[T]) *Element[T] { if mark.list != l { return nil } // see comment in List.Remove about initialization of l return l.insertValue(v, mark.prev) } // InsertAfter inserts a new element e with value v immediately after mark and returns e. // If mark is not an element of l, the list is not modified. // The mark must not be nil. func (l *List[T]) InsertAfter(v T, mark *Element[T]) *Element[T] { if mark.list != l { return nil } // see comment in List.Remove about initialization of l return l.insertValue(v, mark) } // MoveToFront moves element e to the front of list l. // If e is not an element of l, the list is not modified. // The element must not be nil. func (l *List[T]) MoveToFront(e *Element[T]) { if e.list != l || l.root.next == e { return } // see comment in List.Remove about initialization of l l.move(e, &l.root) } // MoveToBack moves element e to the back of list l. // If e is not an element of l, the list is not modified. // The element must not be nil. func (l *List[T]) MoveToBack(e *Element[T]) { if e.list != l || l.root.prev == e { return } // see comment in List.Remove about initialization of l l.move(e, l.root.prev) } // MoveBefore moves element e to its new position before mark. // If e or mark is not an element of l, or e == mark, the list is not modified. // The element and mark must not be nil. func (l *List[T]) MoveBefore(e, mark *Element[T]) { if e.list != l || e == mark || mark.list != l { return } l.move(e, mark.prev) } // MoveAfter moves element e to its new position after mark. // If e or mark is not an element of l, or e == mark, the list is not modified. // The element and mark must not be nil. func (l *List[T]) MoveAfter(e, mark *Element[T]) { if e.list != l || e == mark || mark.list != l { return } l.move(e, mark) } // PushBackList inserts a copy of another list at the back of list l. // The lists l and other may be the same. They must not be nil. func (l *List[T]) PushBackList(other *List[T]) { l.lazyInit() for i, e := other.Len(), other.Front(); i > 0; i, e = i-1, e.Next() { l.insertValue(e.Value, l.root.prev) } } // PushFrontList inserts a copy of another list at the front of list l. // The lists l and other may be the same. They must not be nil. func (l *List[T]) PushFrontList(other *List[T]) { l.lazyInit() for i, e := other.Len(), other.Back(); i > 0; i, e = i-1, e.Prev() { l.insertValue(e.Value, &l.root) } }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/buger/jsonparser/fuzz.go
vendor/github.com/buger/jsonparser/fuzz.go
package jsonparser func FuzzParseString(data []byte) int { r, err := ParseString(data) if err != nil || r == "" { return 0 } return 1 } func FuzzEachKey(data []byte) int { paths := [][]string{ {"name"}, {"order"}, {"nested", "a"}, {"nested", "b"}, {"nested2", "a"}, {"nested", "nested3", "b"}, {"arr", "[1]", "b"}, {"arrInt", "[3]"}, {"arrInt", "[5]"}, {"nested"}, {"arr", "["}, {"a\n", "b\n"}, } EachKey(data, func(idx int, value []byte, vt ValueType, err error) {}, paths...) return 1 } func FuzzDelete(data []byte) int { Delete(data, "test") return 1 } func FuzzSet(data []byte) int { _, err := Set(data, []byte(`"new value"`), "test") if err != nil { return 0 } return 1 } func FuzzObjectEach(data []byte) int { _ = ObjectEach(data, func(key, value []byte, valueType ValueType, off int) error { return nil }) return 1 } func FuzzParseFloat(data []byte) int { _, err := ParseFloat(data) if err != nil { return 0 } return 1 } func FuzzParseInt(data []byte) int { _, err := ParseInt(data) if err != nil { return 0 } return 1 } func FuzzParseBool(data []byte) int { _, err := ParseBoolean(data) if err != nil { return 0 } return 1 } func FuzzTokenStart(data []byte) int { _ = tokenStart(data) return 1 } func FuzzGetString(data []byte) int { _, err := GetString(data, "test") if err != nil { return 0 } return 1 } func FuzzGetFloat(data []byte) int { _, err := GetFloat(data, "test") if err != nil { return 0 } return 1 } func FuzzGetInt(data []byte) int { _, err := GetInt(data, "test") if err != nil { return 0 } return 1 } func FuzzGetBoolean(data []byte) int { _, err := GetBoolean(data, "test") if err != nil { return 0 } return 1 } func FuzzGetUnsafeString(data []byte) int { _, err := GetUnsafeString(data, "test") if err != nil { return 0 } return 1 }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/buger/jsonparser/parser.go
vendor/github.com/buger/jsonparser/parser.go
package jsonparser import ( "bytes" "errors" "fmt" "strconv" ) // Errors var ( KeyPathNotFoundError = errors.New("Key path not found") UnknownValueTypeError = errors.New("Unknown value type") MalformedJsonError = errors.New("Malformed JSON error") MalformedStringError = errors.New("Value is string, but can't find closing '\"' symbol") MalformedArrayError = errors.New("Value is array, but can't find closing ']' symbol") MalformedObjectError = errors.New("Value looks like object, but can't find closing '}' symbol") MalformedValueError = errors.New("Value looks like Number/Boolean/None, but can't find its end: ',' or '}' symbol") OverflowIntegerError = errors.New("Value is number, but overflowed while parsing") MalformedStringEscapeError = errors.New("Encountered an invalid escape sequence in a string") ) // How much stack space to allocate for unescaping JSON strings; if a string longer // than this needs to be escaped, it will result in a heap allocation const unescapeStackBufSize = 64 func tokenEnd(data []byte) int { for i, c := range data { switch c { case ' ', '\n', '\r', '\t', ',', '}', ']': return i } } return len(data) } func findTokenStart(data []byte, token byte) int { for i := len(data) - 1; i >= 0; i-- { switch data[i] { case token: return i case '[', '{': return 0 } } return 0 } func findKeyStart(data []byte, key string) (int, error) { i := 0 ln := len(data) if ln > 0 && (data[0] == '{' || data[0] == '[') { i = 1 } var stackbuf [unescapeStackBufSize]byte // stack-allocated array for allocation-free unescaping of small strings if ku, err := Unescape(StringToBytes(key), stackbuf[:]); err == nil { key = bytesToString(&ku) } for i < ln { switch data[i] { case '"': i++ keyBegin := i strEnd, keyEscaped := stringEnd(data[i:]) if strEnd == -1 { break } i += strEnd keyEnd := i - 1 valueOffset := nextToken(data[i:]) if valueOffset == -1 { break } i += valueOffset // if string is a key, and key level match k := data[keyBegin:keyEnd] // for unescape: if there are no escape sequences, this is cheap; if there are, it is a // bit more expensive, but causes no allocations unless len(key) > unescapeStackBufSize if keyEscaped { if ku, err := Unescape(k, stackbuf[:]); err != nil { break } else { k = ku } } if data[i] == ':' && len(key) == len(k) && bytesToString(&k) == key { return keyBegin - 1, nil } case '[': end := blockEnd(data[i:], data[i], ']') if end != -1 { i = i + end } case '{': end := blockEnd(data[i:], data[i], '}') if end != -1 { i = i + end } } i++ } return -1, KeyPathNotFoundError } func tokenStart(data []byte) int { for i := len(data) - 1; i >= 0; i-- { switch data[i] { case '\n', '\r', '\t', ',', '{', '[': return i } } return 0 } // Find position of next character which is not whitespace func nextToken(data []byte) int { for i, c := range data { switch c { case ' ', '\n', '\r', '\t': continue default: return i } } return -1 } // Find position of last character which is not whitespace func lastToken(data []byte) int { for i := len(data) - 1; i >= 0; i-- { switch data[i] { case ' ', '\n', '\r', '\t': continue default: return i } } return -1 } // Tries to find the end of string // Support if string contains escaped quote symbols. func stringEnd(data []byte) (int, bool) { escaped := false for i, c := range data { if c == '"' { if !escaped { return i + 1, false } else { j := i - 1 for { if j < 0 || data[j] != '\\' { return i + 1, true // even number of backslashes } j-- if j < 0 || data[j] != '\\' { break // odd number of backslashes } j-- } } } else if c == '\\' { escaped = true } } return -1, escaped } // Find end of the data structure, array or object. // For array openSym and closeSym will be '[' and ']', for object '{' and '}' func blockEnd(data []byte, openSym byte, closeSym byte) int { level := 0 i := 0 ln := len(data) for i < ln { switch data[i] { case '"': // If inside string, skip it se, _ := stringEnd(data[i+1:]) if se == -1 { return -1 } i += se case openSym: // If open symbol, increase level level++ case closeSym: // If close symbol, increase level level-- // If we have returned to the original level, we're done if level == 0 { return i + 1 } } i++ } return -1 } func searchKeys(data []byte, keys ...string) int { keyLevel := 0 level := 0 i := 0 ln := len(data) lk := len(keys) lastMatched := true if lk == 0 { return 0 } var stackbuf [unescapeStackBufSize]byte // stack-allocated array for allocation-free unescaping of small strings for i < ln { switch data[i] { case '"': i++ keyBegin := i strEnd, keyEscaped := stringEnd(data[i:]) if strEnd == -1 { return -1 } i += strEnd keyEnd := i - 1 valueOffset := nextToken(data[i:]) if valueOffset == -1 { return -1 } i += valueOffset // if string is a key if data[i] == ':' { if level < 1 { return -1 } key := data[keyBegin:keyEnd] // for unescape: if there are no escape sequences, this is cheap; if there are, it is a // bit more expensive, but causes no allocations unless len(key) > unescapeStackBufSize var keyUnesc []byte if !keyEscaped { keyUnesc = key } else if ku, err := Unescape(key, stackbuf[:]); err != nil { return -1 } else { keyUnesc = ku } if level <= len(keys) { if equalStr(&keyUnesc, keys[level-1]) { lastMatched = true // if key level match if keyLevel == level-1 { keyLevel++ // If we found all keys in path if keyLevel == lk { return i + 1 } } } else { lastMatched = false } } else { return -1 } } else { i-- } case '{': // in case parent key is matched then only we will increase the level otherwise can directly // can move to the end of this block if !lastMatched { end := blockEnd(data[i:], '{', '}') if end == -1 { return -1 } i += end - 1 } else { level++ } case '}': level-- if level == keyLevel { keyLevel-- } case '[': // If we want to get array element by index if keyLevel == level && keys[level][0] == '[' { var keyLen = len(keys[level]) if keyLen < 3 || keys[level][0] != '[' || keys[level][keyLen-1] != ']' { return -1 } aIdx, err := strconv.Atoi(keys[level][1 : keyLen-1]) if err != nil { return -1 } var curIdx int var valueFound []byte var valueOffset int var curI = i ArrayEach(data[i:], func(value []byte, dataType ValueType, offset int, err error) { if curIdx == aIdx { valueFound = value valueOffset = offset if dataType == String { valueOffset = valueOffset - 2 valueFound = data[curI+valueOffset : curI+valueOffset+len(value)+2] } } curIdx += 1 }) if valueFound == nil { return -1 } else { subIndex := searchKeys(valueFound, keys[level+1:]...) if subIndex < 0 { return -1 } return i + valueOffset + subIndex } } else { // Do not search for keys inside arrays if arraySkip := blockEnd(data[i:], '[', ']'); arraySkip == -1 { return -1 } else { i += arraySkip - 1 } } case ':': // If encountered, JSON data is malformed return -1 } i++ } return -1 } func sameTree(p1, p2 []string) bool { minLen := len(p1) if len(p2) < minLen { minLen = len(p2) } for pi_1, p_1 := range p1[:minLen] { if p2[pi_1] != p_1 { return false } } return true } func EachKey(data []byte, cb func(int, []byte, ValueType, error), paths ...[]string) int { var x struct{} pathFlags := make([]bool, len(paths)) var level, pathsMatched, i int ln := len(data) var maxPath int for _, p := range paths { if len(p) > maxPath { maxPath = len(p) } } pathsBuf := make([]string, maxPath) for i < ln { switch data[i] { case '"': i++ keyBegin := i strEnd, keyEscaped := stringEnd(data[i:]) if strEnd == -1 { return -1 } i += strEnd keyEnd := i - 1 valueOffset := nextToken(data[i:]) if valueOffset == -1 { return -1 } i += valueOffset // if string is a key, and key level match if data[i] == ':' { match := -1 key := data[keyBegin:keyEnd] // for unescape: if there are no escape sequences, this is cheap; if there are, it is a // bit more expensive, but causes no allocations unless len(key) > unescapeStackBufSize var keyUnesc []byte if !keyEscaped { keyUnesc = key } else { var stackbuf [unescapeStackBufSize]byte if ku, err := Unescape(key, stackbuf[:]); err != nil { return -1 } else { keyUnesc = ku } } if maxPath >= level { if level < 1 { cb(-1, nil, Unknown, MalformedJsonError) return -1 } pathsBuf[level-1] = bytesToString(&keyUnesc) for pi, p := range paths { if len(p) != level || pathFlags[pi] || !equalStr(&keyUnesc, p[level-1]) || !sameTree(p, pathsBuf[:level]) { continue } match = pi pathsMatched++ pathFlags[pi] = true v, dt, _, e := Get(data[i+1:]) cb(pi, v, dt, e) if pathsMatched == len(paths) { break } } if pathsMatched == len(paths) { return i } } if match == -1 { tokenOffset := nextToken(data[i+1:]) i += tokenOffset if data[i] == '{' { blockSkip := blockEnd(data[i:], '{', '}') i += blockSkip + 1 } } if i < ln { switch data[i] { case '{', '}', '[', '"': i-- } } } else { i-- } case '{': level++ case '}': level-- case '[': var ok bool arrIdxFlags := make(map[int]struct{}) pIdxFlags := make([]bool, len(paths)) if level < 0 { cb(-1, nil, Unknown, MalformedJsonError) return -1 } for pi, p := range paths { if len(p) < level+1 || pathFlags[pi] || p[level][0] != '[' || !sameTree(p, pathsBuf[:level]) { continue } if len(p[level]) >= 2 { aIdx, _ := strconv.Atoi(p[level][1 : len(p[level])-1]) arrIdxFlags[aIdx] = x pIdxFlags[pi] = true } } if len(arrIdxFlags) > 0 { level++ var curIdx int arrOff, _ := ArrayEach(data[i:], func(value []byte, dataType ValueType, offset int, err error) { if _, ok = arrIdxFlags[curIdx]; ok { for pi, p := range paths { if pIdxFlags[pi] { aIdx, _ := strconv.Atoi(p[level-1][1 : len(p[level-1])-1]) if curIdx == aIdx { of := searchKeys(value, p[level:]...) pathsMatched++ pathFlags[pi] = true if of != -1 { v, dt, _, e := Get(value[of:]) cb(pi, v, dt, e) } } } } } curIdx += 1 }) if pathsMatched == len(paths) { return i } i += arrOff - 1 } else { // Do not search for keys inside arrays if arraySkip := blockEnd(data[i:], '[', ']'); arraySkip == -1 { return -1 } else { i += arraySkip - 1 } } case ']': level-- } i++ } return -1 } // Data types available in valid JSON data. type ValueType int const ( NotExist = ValueType(iota) String Number Object Array Boolean Null Unknown ) func (vt ValueType) String() string { switch vt { case NotExist: return "non-existent" case String: return "string" case Number: return "number" case Object: return "object" case Array: return "array" case Boolean: return "boolean" case Null: return "null" default: return "unknown" } } var ( trueLiteral = []byte("true") falseLiteral = []byte("false") nullLiteral = []byte("null") ) func createInsertComponent(keys []string, setValue []byte, comma, object bool) []byte { isIndex := string(keys[0][0]) == "[" offset := 0 lk := calcAllocateSpace(keys, setValue, comma, object) buffer := make([]byte, lk, lk) if comma { offset += WriteToBuffer(buffer[offset:], ",") } if isIndex && !comma { offset += WriteToBuffer(buffer[offset:], "[") } else { if object { offset += WriteToBuffer(buffer[offset:], "{") } if !isIndex { offset += WriteToBuffer(buffer[offset:], "\"") offset += WriteToBuffer(buffer[offset:], keys[0]) offset += WriteToBuffer(buffer[offset:], "\":") } } for i := 1; i < len(keys); i++ { if string(keys[i][0]) == "[" { offset += WriteToBuffer(buffer[offset:], "[") } else { offset += WriteToBuffer(buffer[offset:], "{\"") offset += WriteToBuffer(buffer[offset:], keys[i]) offset += WriteToBuffer(buffer[offset:], "\":") } } offset += WriteToBuffer(buffer[offset:], string(setValue)) for i := len(keys) - 1; i > 0; i-- { if string(keys[i][0]) == "[" { offset += WriteToBuffer(buffer[offset:], "]") } else { offset += WriteToBuffer(buffer[offset:], "}") } } if isIndex && !comma { offset += WriteToBuffer(buffer[offset:], "]") } if object && !isIndex { offset += WriteToBuffer(buffer[offset:], "}") } return buffer } func calcAllocateSpace(keys []string, setValue []byte, comma, object bool) int { isIndex := string(keys[0][0]) == "[" lk := 0 if comma { // , lk += 1 } if isIndex && !comma { // [] lk += 2 } else { if object { // { lk += 1 } if !isIndex { // "keys[0]" lk += len(keys[0]) + 3 } } lk += len(setValue) for i := 1; i < len(keys); i++ { if string(keys[i][0]) == "[" { // [] lk += 2 } else { // {"keys[i]":setValue} lk += len(keys[i]) + 5 } } if object && !isIndex { // } lk += 1 } return lk } func WriteToBuffer(buffer []byte, str string) int { copy(buffer, str) return len(str) } /* Del - Receives existing data structure, path to delete. Returns: `data` - return modified data */ func Delete(data []byte, keys ...string) []byte { lk := len(keys) if lk == 0 { return data[:0] } array := false if len(keys[lk-1]) > 0 && string(keys[lk-1][0]) == "[" { array = true } var startOffset, keyOffset int endOffset := len(data) var err error if !array { if len(keys) > 1 { _, _, startOffset, endOffset, err = internalGet(data, keys[:lk-1]...) if err == KeyPathNotFoundError { // problem parsing the data return data } } keyOffset, err = findKeyStart(data[startOffset:endOffset], keys[lk-1]) if err == KeyPathNotFoundError { // problem parsing the data return data } keyOffset += startOffset _, _, _, subEndOffset, _ := internalGet(data[startOffset:endOffset], keys[lk-1]) endOffset = startOffset + subEndOffset tokEnd := tokenEnd(data[endOffset:]) tokStart := findTokenStart(data[:keyOffset], ","[0]) if data[endOffset+tokEnd] == ","[0] { endOffset += tokEnd + 1 } else if data[endOffset+tokEnd] == " "[0] && len(data) > endOffset+tokEnd+1 && data[endOffset+tokEnd+1] == ","[0] { endOffset += tokEnd + 2 } else if data[endOffset+tokEnd] == "}"[0] && data[tokStart] == ","[0] { keyOffset = tokStart } } else { _, _, keyOffset, endOffset, err = internalGet(data, keys...) if err == KeyPathNotFoundError { // problem parsing the data return data } tokEnd := tokenEnd(data[endOffset:]) tokStart := findTokenStart(data[:keyOffset], ","[0]) if data[endOffset+tokEnd] == ","[0] { endOffset += tokEnd + 1 } else if data[endOffset+tokEnd] == "]"[0] && data[tokStart] == ","[0] { keyOffset = tokStart } } // We need to remove remaining trailing comma if we delete las element in the object prevTok := lastToken(data[:keyOffset]) remainedValue := data[endOffset:] var newOffset int if nextToken(remainedValue) > -1 && remainedValue[nextToken(remainedValue)] == '}' && data[prevTok] == ',' { newOffset = prevTok } else { newOffset = prevTok + 1 } // We have to make a copy here if we don't want to mangle the original data, because byte slices are // accessed by reference and not by value dataCopy := make([]byte, len(data)) copy(dataCopy, data) data = append(dataCopy[:newOffset], dataCopy[endOffset:]...) return data } /* Set - Receives existing data structure, path to set, and data to set at that key. Returns: `value` - modified byte array `err` - On any parsing error */ func Set(data []byte, setValue []byte, keys ...string) (value []byte, err error) { // ensure keys are set if len(keys) == 0 { return nil, KeyPathNotFoundError } _, _, startOffset, endOffset, err := internalGet(data, keys...) if err != nil { if err != KeyPathNotFoundError { // problem parsing the data return nil, err } // full path doesnt exist // does any subpath exist? var depth int for i := range keys { _, _, start, end, sErr := internalGet(data, keys[:i+1]...) if sErr != nil { break } else { endOffset = end startOffset = start depth++ } } comma := true object := false if endOffset == -1 { firstToken := nextToken(data) // We can't set a top-level key if data isn't an object if firstToken < 0 || data[firstToken] != '{' { return nil, KeyPathNotFoundError } // Don't need a comma if the input is an empty object secondToken := firstToken + 1 + nextToken(data[firstToken+1:]) if data[secondToken] == '}' { comma = false } // Set the top level key at the end (accounting for any trailing whitespace) // This assumes last token is valid like '}', could check and return error endOffset = lastToken(data) } depthOffset := endOffset if depth != 0 { // if subpath is a non-empty object, add to it // or if subpath is a non-empty array, add to it if (data[startOffset] == '{' && data[startOffset+1+nextToken(data[startOffset+1:])] != '}') || (data[startOffset] == '[' && data[startOffset+1+nextToken(data[startOffset+1:])] == '{') && keys[depth:][0][0] == 91 { depthOffset-- startOffset = depthOffset // otherwise, over-write it with a new object } else { comma = false object = true } } else { startOffset = depthOffset } value = append(data[:startOffset], append(createInsertComponent(keys[depth:], setValue, comma, object), data[depthOffset:]...)...) } else { // path currently exists startComponent := data[:startOffset] endComponent := data[endOffset:] value = make([]byte, len(startComponent)+len(endComponent)+len(setValue)) newEndOffset := startOffset + len(setValue) copy(value[0:startOffset], startComponent) copy(value[startOffset:newEndOffset], setValue) copy(value[newEndOffset:], endComponent) } return value, nil } func getType(data []byte, offset int) ([]byte, ValueType, int, error) { var dataType ValueType endOffset := offset // if string value if data[offset] == '"' { dataType = String if idx, _ := stringEnd(data[offset+1:]); idx != -1 { endOffset += idx + 1 } else { return nil, dataType, offset, MalformedStringError } } else if data[offset] == '[' { // if array value dataType = Array // break label, for stopping nested loops endOffset = blockEnd(data[offset:], '[', ']') if endOffset == -1 { return nil, dataType, offset, MalformedArrayError } endOffset += offset } else if data[offset] == '{' { // if object value dataType = Object // break label, for stopping nested loops endOffset = blockEnd(data[offset:], '{', '}') if endOffset == -1 { return nil, dataType, offset, MalformedObjectError } endOffset += offset } else { // Number, Boolean or None end := tokenEnd(data[endOffset:]) if end == -1 { return nil, dataType, offset, MalformedValueError } value := data[offset : endOffset+end] switch data[offset] { case 't', 'f': // true or false if bytes.Equal(value, trueLiteral) || bytes.Equal(value, falseLiteral) { dataType = Boolean } else { return nil, Unknown, offset, UnknownValueTypeError } case 'u', 'n': // undefined or null if bytes.Equal(value, nullLiteral) { dataType = Null } else { return nil, Unknown, offset, UnknownValueTypeError } case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '-': dataType = Number default: return nil, Unknown, offset, UnknownValueTypeError } endOffset += end } return data[offset:endOffset], dataType, endOffset, nil } /* Get - Receives data structure, and key path to extract value from. Returns: `value` - Pointer to original data structure containing key value, or just empty slice if nothing found or error `dataType` - Can be: `NotExist`, `String`, `Number`, `Object`, `Array`, `Boolean` or `Null` `offset` - Offset from provided data structure where key value ends. Used mostly internally, for example for `ArrayEach` helper. `err` - If key not found or any other parsing issue it should return error. If key not found it also sets `dataType` to `NotExist` Accept multiple keys to specify path to JSON value (in case of quering nested structures). If no keys provided it will try to extract closest JSON value (simple ones or object/array), useful for reading streams or arrays, see `ArrayEach` implementation. */ func Get(data []byte, keys ...string) (value []byte, dataType ValueType, offset int, err error) { a, b, _, d, e := internalGet(data, keys...) return a, b, d, e } func internalGet(data []byte, keys ...string) (value []byte, dataType ValueType, offset, endOffset int, err error) { if len(keys) > 0 { if offset = searchKeys(data, keys...); offset == -1 { return nil, NotExist, -1, -1, KeyPathNotFoundError } } // Go to closest value nO := nextToken(data[offset:]) if nO == -1 { return nil, NotExist, offset, -1, MalformedJsonError } offset += nO value, dataType, endOffset, err = getType(data, offset) if err != nil { return value, dataType, offset, endOffset, err } // Strip quotes from string values if dataType == String { value = value[1 : len(value)-1] } return value[:len(value):len(value)], dataType, offset, endOffset, nil } // ArrayEach is used when iterating arrays, accepts a callback function with the same return arguments as `Get`. func ArrayEach(data []byte, cb func(value []byte, dataType ValueType, offset int, err error), keys ...string) (offset int, err error) { if len(data) == 0 { return -1, MalformedObjectError } nT := nextToken(data) if nT == -1 { return -1, MalformedJsonError } offset = nT + 1 if len(keys) > 0 { if offset = searchKeys(data, keys...); offset == -1 { return offset, KeyPathNotFoundError } // Go to closest value nO := nextToken(data[offset:]) if nO == -1 { return offset, MalformedJsonError } offset += nO if data[offset] != '[' { return offset, MalformedArrayError } offset++ } nO := nextToken(data[offset:]) if nO == -1 { return offset, MalformedJsonError } offset += nO if data[offset] == ']' { return offset, nil } for true { v, t, o, e := Get(data[offset:]) if e != nil { return offset, e } if o == 0 { break } if t != NotExist { cb(v, t, offset+o-len(v), e) } if e != nil { break } offset += o skipToToken := nextToken(data[offset:]) if skipToToken == -1 { return offset, MalformedArrayError } offset += skipToToken if data[offset] == ']' { break } if data[offset] != ',' { return offset, MalformedArrayError } offset++ } return offset, nil } // ObjectEach iterates over the key-value pairs of a JSON object, invoking a given callback for each such entry func ObjectEach(data []byte, callback func(key []byte, value []byte, dataType ValueType, offset int) error, keys ...string) (err error) { offset := 0 // Descend to the desired key, if requested if len(keys) > 0 { if off := searchKeys(data, keys...); off == -1 { return KeyPathNotFoundError } else { offset = off } } // Validate and skip past opening brace if off := nextToken(data[offset:]); off == -1 { return MalformedObjectError } else if offset += off; data[offset] != '{' { return MalformedObjectError } else { offset++ } // Skip to the first token inside the object, or stop if we find the ending brace if off := nextToken(data[offset:]); off == -1 { return MalformedJsonError } else if offset += off; data[offset] == '}' { return nil } // Loop pre-condition: data[offset] points to what should be either the next entry's key, or the closing brace (if it's anything else, the JSON is malformed) for offset < len(data) { // Step 1: find the next key var key []byte // Check what the the next token is: start of string, end of object, or something else (error) switch data[offset] { case '"': offset++ // accept as string and skip opening quote case '}': return nil // we found the end of the object; stop and return success default: return MalformedObjectError } // Find the end of the key string var keyEscaped bool if off, esc := stringEnd(data[offset:]); off == -1 { return MalformedJsonError } else { key, keyEscaped = data[offset:offset+off-1], esc offset += off } // Unescape the string if needed if keyEscaped { var stackbuf [unescapeStackBufSize]byte // stack-allocated array for allocation-free unescaping of small strings if keyUnescaped, err := Unescape(key, stackbuf[:]); err != nil { return MalformedStringEscapeError } else { key = keyUnescaped } } // Step 2: skip the colon if off := nextToken(data[offset:]); off == -1 { return MalformedJsonError } else if offset += off; data[offset] != ':' { return MalformedJsonError } else { offset++ } // Step 3: find the associated value, then invoke the callback if value, valueType, off, err := Get(data[offset:]); err != nil { return err } else if err := callback(key, value, valueType, offset+off); err != nil { // Invoke the callback here! return err } else { offset += off } // Step 4: skip over the next comma to the following token, or stop if we hit the ending brace if off := nextToken(data[offset:]); off == -1 { return MalformedArrayError } else { offset += off switch data[offset] { case '}': return nil // Stop if we hit the close brace case ',': offset++ // Ignore the comma default: return MalformedObjectError } } // Skip to the next token after the comma if off := nextToken(data[offset:]); off == -1 { return MalformedArrayError } else { offset += off } } return MalformedObjectError // we shouldn't get here; it's expected that we will return via finding the ending brace } // GetUnsafeString returns the value retrieved by `Get`, use creates string without memory allocation by mapping string to slice memory. It does not handle escape symbols. func GetUnsafeString(data []byte, keys ...string) (val string, err error) { v, _, _, e := Get(data, keys...) if e != nil { return "", e } return bytesToString(&v), nil } // GetString returns the value retrieved by `Get`, cast to a string if possible, trying to properly handle escape and utf8 symbols // If key data type do not match, it will return an error. func GetString(data []byte, keys ...string) (val string, err error) { v, t, _, e := Get(data, keys...) if e != nil { return "", e } if t != String { return "", fmt.Errorf("Value is not a string: %s", string(v)) } // If no escapes return raw content if bytes.IndexByte(v, '\\') == -1 { return string(v), nil } return ParseString(v) } // GetFloat returns the value retrieved by `Get`, cast to a float64 if possible. // The offset is the same as in `Get`. // If key data type do not match, it will return an error. func GetFloat(data []byte, keys ...string) (val float64, err error) { v, t, _, e := Get(data, keys...) if e != nil { return 0, e } if t != Number { return 0, fmt.Errorf("Value is not a number: %s", string(v)) } return ParseFloat(v) } // GetInt returns the value retrieved by `Get`, cast to a int64 if possible. // If key data type do not match, it will return an error. func GetInt(data []byte, keys ...string) (val int64, err error) { v, t, _, e := Get(data, keys...) if e != nil { return 0, e } if t != Number { return 0, fmt.Errorf("Value is not a number: %s", string(v)) } return ParseInt(v) } // GetBoolean returns the value retrieved by `Get`, cast to a bool if possible. // The offset is the same as in `Get`. // If key data type do not match, it will return error. func GetBoolean(data []byte, keys ...string) (val bool, err error) { v, t, _, e := Get(data, keys...) if e != nil { return false, e } if t != Boolean { return false, fmt.Errorf("Value is not a boolean: %s", string(v)) } return ParseBoolean(v) } // ParseBoolean parses a Boolean ValueType into a Go bool (not particularly useful, but here for completeness) func ParseBoolean(b []byte) (bool, error) { switch { case bytes.Equal(b, trueLiteral): return true, nil case bytes.Equal(b, falseLiteral): return false, nil default: return false, MalformedValueError } } // ParseString parses a String ValueType into a Go string (the main parsing work is unescaping the JSON string) func ParseString(b []byte) (string, error) { var stackbuf [unescapeStackBufSize]byte // stack-allocated array for allocation-free unescaping of small strings if bU, err := Unescape(b, stackbuf[:]); err != nil { return "", MalformedValueError } else { return string(bU), nil } } // ParseNumber parses a Number ValueType into a Go float64 func ParseFloat(b []byte) (float64, error) { if v, err := parseFloat(&b); err != nil { return 0, MalformedValueError } else { return v, nil } } // ParseInt parses a Number ValueType into a Go int64 func ParseInt(b []byte) (int64, error) { if v, ok, overflow := parseInt(b); !ok { if overflow { return 0, OverflowIntegerError } return 0, MalformedValueError } else { return v, nil } }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/buger/jsonparser/bytes_safe.go
vendor/github.com/buger/jsonparser/bytes_safe.go
// +build appengine appenginevm package jsonparser import ( "strconv" ) // See fastbytes_unsafe.go for explanation on why *[]byte is used (signatures must be consistent with those in that file) func equalStr(b *[]byte, s string) bool { return string(*b) == s } func parseFloat(b *[]byte) (float64, error) { return strconv.ParseFloat(string(*b), 64) } func bytesToString(b *[]byte) string { return string(*b) } func StringToBytes(s string) []byte { return []byte(s) }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/buger/jsonparser/bytes.go
vendor/github.com/buger/jsonparser/bytes.go
package jsonparser import ( bio "bytes" ) // minInt64 '-9223372036854775808' is the smallest representable number in int64 const minInt64 = `9223372036854775808` // About 2x faster then strconv.ParseInt because it only supports base 10, which is enough for JSON func parseInt(bytes []byte) (v int64, ok bool, overflow bool) { if len(bytes) == 0 { return 0, false, false } var neg bool = false if bytes[0] == '-' { neg = true bytes = bytes[1:] } var b int64 = 0 for _, c := range bytes { if c >= '0' && c <= '9' { b = (10 * v) + int64(c-'0') } else { return 0, false, false } if overflow = (b < v); overflow { break } v = b } if overflow { if neg && bio.Equal(bytes, []byte(minInt64)) { return b, true, false } return 0, false, true } if neg { return -v, true, false } else { return v, true, false } }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/buger/jsonparser/escape.go
vendor/github.com/buger/jsonparser/escape.go
package jsonparser import ( "bytes" "unicode/utf8" ) // JSON Unicode stuff: see https://tools.ietf.org/html/rfc7159#section-7 const supplementalPlanesOffset = 0x10000 const highSurrogateOffset = 0xD800 const lowSurrogateOffset = 0xDC00 const basicMultilingualPlaneReservedOffset = 0xDFFF const basicMultilingualPlaneOffset = 0xFFFF func combineUTF16Surrogates(high, low rune) rune { return supplementalPlanesOffset + (high-highSurrogateOffset)<<10 + (low - lowSurrogateOffset) } const badHex = -1 func h2I(c byte) int { switch { case c >= '0' && c <= '9': return int(c - '0') case c >= 'A' && c <= 'F': return int(c - 'A' + 10) case c >= 'a' && c <= 'f': return int(c - 'a' + 10) } return badHex } // decodeSingleUnicodeEscape decodes a single \uXXXX escape sequence. The prefix \u is assumed to be present and // is not checked. // In JSON, these escapes can either come alone or as part of "UTF16 surrogate pairs" that must be handled together. // This function only handles one; decodeUnicodeEscape handles this more complex case. func decodeSingleUnicodeEscape(in []byte) (rune, bool) { // We need at least 6 characters total if len(in) < 6 { return utf8.RuneError, false } // Convert hex to decimal h1, h2, h3, h4 := h2I(in[2]), h2I(in[3]), h2I(in[4]), h2I(in[5]) if h1 == badHex || h2 == badHex || h3 == badHex || h4 == badHex { return utf8.RuneError, false } // Compose the hex digits return rune(h1<<12 + h2<<8 + h3<<4 + h4), true } // isUTF16EncodedRune checks if a rune is in the range for non-BMP characters, // which is used to describe UTF16 chars. // Source: https://en.wikipedia.org/wiki/Plane_(Unicode)#Basic_Multilingual_Plane func isUTF16EncodedRune(r rune) bool { return highSurrogateOffset <= r && r <= basicMultilingualPlaneReservedOffset } func decodeUnicodeEscape(in []byte) (rune, int) { if r, ok := decodeSingleUnicodeEscape(in); !ok { // Invalid Unicode escape return utf8.RuneError, -1 } else if r <= basicMultilingualPlaneOffset && !isUTF16EncodedRune(r) { // Valid Unicode escape in Basic Multilingual Plane return r, 6 } else if r2, ok := decodeSingleUnicodeEscape(in[6:]); !ok { // Note: previous decodeSingleUnicodeEscape success guarantees at least 6 bytes remain // UTF16 "high surrogate" without manditory valid following Unicode escape for the "low surrogate" return utf8.RuneError, -1 } else if r2 < lowSurrogateOffset { // Invalid UTF16 "low surrogate" return utf8.RuneError, -1 } else { // Valid UTF16 surrogate pair return combineUTF16Surrogates(r, r2), 12 } } // backslashCharEscapeTable: when '\X' is found for some byte X, it is to be replaced with backslashCharEscapeTable[X] var backslashCharEscapeTable = [...]byte{ '"': '"', '\\': '\\', '/': '/', 'b': '\b', 'f': '\f', 'n': '\n', 'r': '\r', 't': '\t', } // unescapeToUTF8 unescapes the single escape sequence starting at 'in' into 'out' and returns // how many characters were consumed from 'in' and emitted into 'out'. // If a valid escape sequence does not appear as a prefix of 'in', (-1, -1) to signal the error. func unescapeToUTF8(in, out []byte) (inLen int, outLen int) { if len(in) < 2 || in[0] != '\\' { // Invalid escape due to insufficient characters for any escape or no initial backslash return -1, -1 } // https://tools.ietf.org/html/rfc7159#section-7 switch e := in[1]; e { case '"', '\\', '/', 'b', 'f', 'n', 'r', 't': // Valid basic 2-character escapes (use lookup table) out[0] = backslashCharEscapeTable[e] return 2, 1 case 'u': // Unicode escape if r, inLen := decodeUnicodeEscape(in); inLen == -1 { // Invalid Unicode escape return -1, -1 } else { // Valid Unicode escape; re-encode as UTF8 outLen := utf8.EncodeRune(out, r) return inLen, outLen } } return -1, -1 } // unescape unescapes the string contained in 'in' and returns it as a slice. // If 'in' contains no escaped characters: // Returns 'in'. // Else, if 'out' is of sufficient capacity (guaranteed if cap(out) >= len(in)): // 'out' is used to build the unescaped string and is returned with no extra allocation // Else: // A new slice is allocated and returned. func Unescape(in, out []byte) ([]byte, error) { firstBackslash := bytes.IndexByte(in, '\\') if firstBackslash == -1 { return in, nil } // Get a buffer of sufficient size (allocate if needed) if cap(out) < len(in) { out = make([]byte, len(in)) } else { out = out[0:len(in)] } // Copy the first sequence of unescaped bytes to the output and obtain a buffer pointer (subslice) copy(out, in[:firstBackslash]) in = in[firstBackslash:] buf := out[firstBackslash:] for len(in) > 0 { // Unescape the next escaped character inLen, bufLen := unescapeToUTF8(in, buf) if inLen == -1 { return nil, MalformedStringEscapeError } in = in[inLen:] buf = buf[bufLen:] // Copy everything up until the next backslash nextBackslash := bytes.IndexByte(in, '\\') if nextBackslash == -1 { copy(buf, in) buf = buf[len(in):] break } else { copy(buf, in[:nextBackslash]) buf = buf[nextBackslash:] in = in[nextBackslash:] } } // Trim the out buffer to the amount that was actually emitted return out[:len(out)-len(buf)], nil }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/buger/jsonparser/bytes_unsafe.go
vendor/github.com/buger/jsonparser/bytes_unsafe.go
// +build !appengine,!appenginevm package jsonparser import ( "reflect" "strconv" "unsafe" "runtime" ) // // The reason for using *[]byte rather than []byte in parameters is an optimization. As of Go 1.6, // the compiler cannot perfectly inline the function when using a non-pointer slice. That is, // the non-pointer []byte parameter version is slower than if its function body is manually // inlined, whereas the pointer []byte version is equally fast to the manually inlined // version. Instruction count in assembly taken from "go tool compile" confirms this difference. // // TODO: Remove hack after Go 1.7 release // func equalStr(b *[]byte, s string) bool { return *(*string)(unsafe.Pointer(b)) == s } func parseFloat(b *[]byte) (float64, error) { return strconv.ParseFloat(*(*string)(unsafe.Pointer(b)), 64) } // A hack until issue golang/go#2632 is fixed. // See: https://github.com/golang/go/issues/2632 func bytesToString(b *[]byte) string { return *(*string)(unsafe.Pointer(b)) } func StringToBytes(s string) []byte { b := make([]byte, 0, 0) bh := (*reflect.SliceHeader)(unsafe.Pointer(&b)) sh := (*reflect.StringHeader)(unsafe.Pointer(&s)) bh.Data = sh.Data bh.Cap = sh.Len bh.Len = sh.Len runtime.KeepAlive(s) return b }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go
vendor/github.com/Microsoft/go-winio/zsyscall_windows.go
//go:build windows // Code generated by 'go generate' using "github.com/Microsoft/go-winio/tools/mkwinsyscall"; DO NOT EDIT. package winio import ( "syscall" "unsafe" "golang.org/x/sys/windows" ) var _ unsafe.Pointer // Do the interface allocations only once for common // Errno values. const ( errnoERROR_IO_PENDING = 997 ) var ( errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) errERROR_EINVAL error = syscall.EINVAL ) // errnoErr returns common boxed Errno values, to prevent // allocations at runtime. func errnoErr(e syscall.Errno) error { switch e { case 0: return errERROR_EINVAL case errnoERROR_IO_PENDING: return errERROR_IO_PENDING } return e } var ( modadvapi32 = windows.NewLazySystemDLL("advapi32.dll") modkernel32 = windows.NewLazySystemDLL("kernel32.dll") modntdll = windows.NewLazySystemDLL("ntdll.dll") modws2_32 = windows.NewLazySystemDLL("ws2_32.dll") procAdjustTokenPrivileges = modadvapi32.NewProc("AdjustTokenPrivileges") procConvertSidToStringSidW = modadvapi32.NewProc("ConvertSidToStringSidW") procConvertStringSidToSidW = modadvapi32.NewProc("ConvertStringSidToSidW") procImpersonateSelf = modadvapi32.NewProc("ImpersonateSelf") procLookupAccountNameW = modadvapi32.NewProc("LookupAccountNameW") procLookupAccountSidW = modadvapi32.NewProc("LookupAccountSidW") procLookupPrivilegeDisplayNameW = modadvapi32.NewProc("LookupPrivilegeDisplayNameW") procLookupPrivilegeNameW = modadvapi32.NewProc("LookupPrivilegeNameW") procLookupPrivilegeValueW = modadvapi32.NewProc("LookupPrivilegeValueW") procOpenThreadToken = modadvapi32.NewProc("OpenThreadToken") procRevertToSelf = modadvapi32.NewProc("RevertToSelf") procBackupRead = modkernel32.NewProc("BackupRead") procBackupWrite = modkernel32.NewProc("BackupWrite") procCancelIoEx = modkernel32.NewProc("CancelIoEx") procConnectNamedPipe = modkernel32.NewProc("ConnectNamedPipe") procCreateIoCompletionPort = modkernel32.NewProc("CreateIoCompletionPort") procCreateNamedPipeW = modkernel32.NewProc("CreateNamedPipeW") procDisconnectNamedPipe = modkernel32.NewProc("DisconnectNamedPipe") procGetCurrentThread = modkernel32.NewProc("GetCurrentThread") procGetNamedPipeHandleStateW = modkernel32.NewProc("GetNamedPipeHandleStateW") procGetNamedPipeInfo = modkernel32.NewProc("GetNamedPipeInfo") procGetQueuedCompletionStatus = modkernel32.NewProc("GetQueuedCompletionStatus") procSetFileCompletionNotificationModes = modkernel32.NewProc("SetFileCompletionNotificationModes") procNtCreateNamedPipeFile = modntdll.NewProc("NtCreateNamedPipeFile") procRtlDefaultNpAcl = modntdll.NewProc("RtlDefaultNpAcl") procRtlDosPathNameToNtPathName_U = modntdll.NewProc("RtlDosPathNameToNtPathName_U") procRtlNtStatusToDosErrorNoTeb = modntdll.NewProc("RtlNtStatusToDosErrorNoTeb") procWSAGetOverlappedResult = modws2_32.NewProc("WSAGetOverlappedResult") ) func adjustTokenPrivileges(token windows.Token, releaseAll bool, input *byte, outputSize uint32, output *byte, requiredSize *uint32) (success bool, err error) { var _p0 uint32 if releaseAll { _p0 = 1 } r0, _, e1 := syscall.SyscallN(procAdjustTokenPrivileges.Addr(), uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(input)), uintptr(outputSize), uintptr(unsafe.Pointer(output)), uintptr(unsafe.Pointer(requiredSize))) success = r0 != 0 if true { err = errnoErr(e1) } return } func convertSidToStringSid(sid *byte, str **uint16) (err error) { r1, _, e1 := syscall.SyscallN(procConvertSidToStringSidW.Addr(), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(str))) if r1 == 0 { err = errnoErr(e1) } return } func convertStringSidToSid(str *uint16, sid **byte) (err error) { r1, _, e1 := syscall.SyscallN(procConvertStringSidToSidW.Addr(), uintptr(unsafe.Pointer(str)), uintptr(unsafe.Pointer(sid))) if r1 == 0 { err = errnoErr(e1) } return } func impersonateSelf(level uint32) (err error) { r1, _, e1 := syscall.SyscallN(procImpersonateSelf.Addr(), uintptr(level)) if r1 == 0 { err = errnoErr(e1) } return } func lookupAccountName(systemName *uint16, accountName string, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) { var _p0 *uint16 _p0, err = syscall.UTF16PtrFromString(accountName) if err != nil { return } return _lookupAccountName(systemName, _p0, sid, sidSize, refDomain, refDomainSize, sidNameUse) } func _lookupAccountName(systemName *uint16, accountName *uint16, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) { r1, _, e1 := syscall.SyscallN(procLookupAccountNameW.Addr(), uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(accountName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sidSize)), uintptr(unsafe.Pointer(refDomain)), uintptr(unsafe.Pointer(refDomainSize)), uintptr(unsafe.Pointer(sidNameUse))) if r1 == 0 { err = errnoErr(e1) } return } func lookupAccountSid(systemName *uint16, sid *byte, name *uint16, nameSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) { r1, _, e1 := syscall.SyscallN(procLookupAccountSidW.Addr(), uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameSize)), uintptr(unsafe.Pointer(refDomain)), uintptr(unsafe.Pointer(refDomainSize)), uintptr(unsafe.Pointer(sidNameUse))) if r1 == 0 { err = errnoErr(e1) } return } func lookupPrivilegeDisplayName(systemName string, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) { var _p0 *uint16 _p0, err = syscall.UTF16PtrFromString(systemName) if err != nil { return } return _lookupPrivilegeDisplayName(_p0, name, buffer, size, languageId) } func _lookupPrivilegeDisplayName(systemName *uint16, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) { r1, _, e1 := syscall.SyscallN(procLookupPrivilegeDisplayNameW.Addr(), uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buffer)), uintptr(unsafe.Pointer(size)), uintptr(unsafe.Pointer(languageId))) if r1 == 0 { err = errnoErr(e1) } return } func lookupPrivilegeName(systemName string, luid *uint64, buffer *uint16, size *uint32) (err error) { var _p0 *uint16 _p0, err = syscall.UTF16PtrFromString(systemName) if err != nil { return } return _lookupPrivilegeName(_p0, luid, buffer, size) } func _lookupPrivilegeName(systemName *uint16, luid *uint64, buffer *uint16, size *uint32) (err error) { r1, _, e1 := syscall.SyscallN(procLookupPrivilegeNameW.Addr(), uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(luid)), uintptr(unsafe.Pointer(buffer)), uintptr(unsafe.Pointer(size))) if r1 == 0 { err = errnoErr(e1) } return } func lookupPrivilegeValue(systemName string, name string, luid *uint64) (err error) { var _p0 *uint16 _p0, err = syscall.UTF16PtrFromString(systemName) if err != nil { return } var _p1 *uint16 _p1, err = syscall.UTF16PtrFromString(name) if err != nil { return } return _lookupPrivilegeValue(_p0, _p1, luid) } func _lookupPrivilegeValue(systemName *uint16, name *uint16, luid *uint64) (err error) { r1, _, e1 := syscall.SyscallN(procLookupPrivilegeValueW.Addr(), uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(luid))) if r1 == 0 { err = errnoErr(e1) } return } func openThreadToken(thread windows.Handle, accessMask uint32, openAsSelf bool, token *windows.Token) (err error) { var _p0 uint32 if openAsSelf { _p0 = 1 } r1, _, e1 := syscall.SyscallN(procOpenThreadToken.Addr(), uintptr(thread), uintptr(accessMask), uintptr(_p0), uintptr(unsafe.Pointer(token))) if r1 == 0 { err = errnoErr(e1) } return } func revertToSelf() (err error) { r1, _, e1 := syscall.SyscallN(procRevertToSelf.Addr()) if r1 == 0 { err = errnoErr(e1) } return } func backupRead(h windows.Handle, b []byte, bytesRead *uint32, abort bool, processSecurity bool, context *uintptr) (err error) { var _p0 *byte if len(b) > 0 { _p0 = &b[0] } var _p1 uint32 if abort { _p1 = 1 } var _p2 uint32 if processSecurity { _p2 = 1 } r1, _, e1 := syscall.SyscallN(procBackupRead.Addr(), uintptr(h), uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(unsafe.Pointer(bytesRead)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(context))) if r1 == 0 { err = errnoErr(e1) } return } func backupWrite(h windows.Handle, b []byte, bytesWritten *uint32, abort bool, processSecurity bool, context *uintptr) (err error) { var _p0 *byte if len(b) > 0 { _p0 = &b[0] } var _p1 uint32 if abort { _p1 = 1 } var _p2 uint32 if processSecurity { _p2 = 1 } r1, _, e1 := syscall.SyscallN(procBackupWrite.Addr(), uintptr(h), uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(unsafe.Pointer(bytesWritten)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(context))) if r1 == 0 { err = errnoErr(e1) } return } func cancelIoEx(file windows.Handle, o *windows.Overlapped) (err error) { r1, _, e1 := syscall.SyscallN(procCancelIoEx.Addr(), uintptr(file), uintptr(unsafe.Pointer(o))) if r1 == 0 { err = errnoErr(e1) } return } func connectNamedPipe(pipe windows.Handle, o *windows.Overlapped) (err error) { r1, _, e1 := syscall.SyscallN(procConnectNamedPipe.Addr(), uintptr(pipe), uintptr(unsafe.Pointer(o))) if r1 == 0 { err = errnoErr(e1) } return } func createIoCompletionPort(file windows.Handle, port windows.Handle, key uintptr, threadCount uint32) (newport windows.Handle, err error) { r0, _, e1 := syscall.SyscallN(procCreateIoCompletionPort.Addr(), uintptr(file), uintptr(port), uintptr(key), uintptr(threadCount)) newport = windows.Handle(r0) if newport == 0 { err = errnoErr(e1) } return } func createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *windows.SecurityAttributes) (handle windows.Handle, err error) { var _p0 *uint16 _p0, err = syscall.UTF16PtrFromString(name) if err != nil { return } return _createNamedPipe(_p0, flags, pipeMode, maxInstances, outSize, inSize, defaultTimeout, sa) } func _createNamedPipe(name *uint16, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *windows.SecurityAttributes) (handle windows.Handle, err error) { r0, _, e1 := syscall.SyscallN(procCreateNamedPipeW.Addr(), uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(pipeMode), uintptr(maxInstances), uintptr(outSize), uintptr(inSize), uintptr(defaultTimeout), uintptr(unsafe.Pointer(sa))) handle = windows.Handle(r0) if handle == windows.InvalidHandle { err = errnoErr(e1) } return } func disconnectNamedPipe(pipe windows.Handle) (err error) { r1, _, e1 := syscall.SyscallN(procDisconnectNamedPipe.Addr(), uintptr(pipe)) if r1 == 0 { err = errnoErr(e1) } return } func getCurrentThread() (h windows.Handle) { r0, _, _ := syscall.SyscallN(procGetCurrentThread.Addr()) h = windows.Handle(r0) return } func getNamedPipeHandleState(pipe windows.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) { r1, _, e1 := syscall.SyscallN(procGetNamedPipeHandleStateW.Addr(), uintptr(pipe), uintptr(unsafe.Pointer(state)), uintptr(unsafe.Pointer(curInstances)), uintptr(unsafe.Pointer(maxCollectionCount)), uintptr(unsafe.Pointer(collectDataTimeout)), uintptr(unsafe.Pointer(userName)), uintptr(maxUserNameSize)) if r1 == 0 { err = errnoErr(e1) } return } func getNamedPipeInfo(pipe windows.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) { r1, _, e1 := syscall.SyscallN(procGetNamedPipeInfo.Addr(), uintptr(pipe), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(outSize)), uintptr(unsafe.Pointer(inSize)), uintptr(unsafe.Pointer(maxInstances))) if r1 == 0 { err = errnoErr(e1) } return } func getQueuedCompletionStatus(port windows.Handle, bytes *uint32, key *uintptr, o **ioOperation, timeout uint32) (err error) { r1, _, e1 := syscall.SyscallN(procGetQueuedCompletionStatus.Addr(), uintptr(port), uintptr(unsafe.Pointer(bytes)), uintptr(unsafe.Pointer(key)), uintptr(unsafe.Pointer(o)), uintptr(timeout)) if r1 == 0 { err = errnoErr(e1) } return } func setFileCompletionNotificationModes(h windows.Handle, flags uint8) (err error) { r1, _, e1 := syscall.SyscallN(procSetFileCompletionNotificationModes.Addr(), uintptr(h), uintptr(flags)) if r1 == 0 { err = errnoErr(e1) } return } func ntCreateNamedPipeFile(pipe *windows.Handle, access ntAccessMask, oa *objectAttributes, iosb *ioStatusBlock, share ntFileShareMode, disposition ntFileCreationDisposition, options ntFileOptions, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (status ntStatus) { r0, _, _ := syscall.SyscallN(procNtCreateNamedPipeFile.Addr(), uintptr(unsafe.Pointer(pipe)), uintptr(access), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(share), uintptr(disposition), uintptr(options), uintptr(typ), uintptr(readMode), uintptr(completionMode), uintptr(maxInstances), uintptr(inboundQuota), uintptr(outputQuota), uintptr(unsafe.Pointer(timeout))) status = ntStatus(r0) return } func rtlDefaultNpAcl(dacl *uintptr) (status ntStatus) { r0, _, _ := syscall.SyscallN(procRtlDefaultNpAcl.Addr(), uintptr(unsafe.Pointer(dacl))) status = ntStatus(r0) return } func rtlDosPathNameToNtPathName(name *uint16, ntName *unicodeString, filePart uintptr, reserved uintptr) (status ntStatus) { r0, _, _ := syscall.SyscallN(procRtlDosPathNameToNtPathName_U.Addr(), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(ntName)), uintptr(filePart), uintptr(reserved)) status = ntStatus(r0) return } func rtlNtStatusToDosError(status ntStatus) (winerr error) { r0, _, _ := syscall.SyscallN(procRtlNtStatusToDosErrorNoTeb.Addr(), uintptr(status)) if r0 != 0 { winerr = syscall.Errno(r0) } return } func wsaGetOverlappedResult(h windows.Handle, o *windows.Overlapped, bytes *uint32, wait bool, flags *uint32) (err error) { var _p0 uint32 if wait { _p0 = 1 } r1, _, e1 := syscall.SyscallN(procWSAGetOverlappedResult.Addr(), uintptr(h), uintptr(unsafe.Pointer(o)), uintptr(unsafe.Pointer(bytes)), uintptr(_p0), uintptr(unsafe.Pointer(flags))) if r1 == 0 { err = errnoErr(e1) } return }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/Microsoft/go-winio/file.go
vendor/github.com/Microsoft/go-winio/file.go
//go:build windows // +build windows package winio import ( "errors" "io" "runtime" "sync" "sync/atomic" "syscall" "time" "golang.org/x/sys/windows" ) //sys cancelIoEx(file windows.Handle, o *windows.Overlapped) (err error) = CancelIoEx //sys createIoCompletionPort(file windows.Handle, port windows.Handle, key uintptr, threadCount uint32) (newport windows.Handle, err error) = CreateIoCompletionPort //sys getQueuedCompletionStatus(port windows.Handle, bytes *uint32, key *uintptr, o **ioOperation, timeout uint32) (err error) = GetQueuedCompletionStatus //sys setFileCompletionNotificationModes(h windows.Handle, flags uint8) (err error) = SetFileCompletionNotificationModes //sys wsaGetOverlappedResult(h windows.Handle, o *windows.Overlapped, bytes *uint32, wait bool, flags *uint32) (err error) = ws2_32.WSAGetOverlappedResult var ( ErrFileClosed = errors.New("file has already been closed") ErrTimeout = &timeoutError{} ) type timeoutError struct{} func (*timeoutError) Error() string { return "i/o timeout" } func (*timeoutError) Timeout() bool { return true } func (*timeoutError) Temporary() bool { return true } type timeoutChan chan struct{} var ioInitOnce sync.Once var ioCompletionPort windows.Handle // ioResult contains the result of an asynchronous IO operation. type ioResult struct { bytes uint32 err error } // ioOperation represents an outstanding asynchronous Win32 IO. type ioOperation struct { o windows.Overlapped ch chan ioResult } func initIO() { h, err := createIoCompletionPort(windows.InvalidHandle, 0, 0, 0xffffffff) if err != nil { panic(err) } ioCompletionPort = h go ioCompletionProcessor(h) } // win32File implements Reader, Writer, and Closer on a Win32 handle without blocking in a syscall. // It takes ownership of this handle and will close it if it is garbage collected. type win32File struct { handle windows.Handle wg sync.WaitGroup wgLock sync.RWMutex closing atomic.Bool socket bool readDeadline deadlineHandler writeDeadline deadlineHandler } type deadlineHandler struct { setLock sync.Mutex channel timeoutChan channelLock sync.RWMutex timer *time.Timer timedout atomic.Bool } // makeWin32File makes a new win32File from an existing file handle. func makeWin32File(h windows.Handle) (*win32File, error) { f := &win32File{handle: h} ioInitOnce.Do(initIO) _, err := createIoCompletionPort(h, ioCompletionPort, 0, 0xffffffff) if err != nil { return nil, err } err = setFileCompletionNotificationModes(h, windows.FILE_SKIP_COMPLETION_PORT_ON_SUCCESS|windows.FILE_SKIP_SET_EVENT_ON_HANDLE) if err != nil { return nil, err } f.readDeadline.channel = make(timeoutChan) f.writeDeadline.channel = make(timeoutChan) return f, nil } // Deprecated: use NewOpenFile instead. func MakeOpenFile(h syscall.Handle) (io.ReadWriteCloser, error) { return NewOpenFile(windows.Handle(h)) } func NewOpenFile(h windows.Handle) (io.ReadWriteCloser, error) { // If we return the result of makeWin32File directly, it can result in an // interface-wrapped nil, rather than a nil interface value. f, err := makeWin32File(h) if err != nil { return nil, err } return f, nil } // closeHandle closes the resources associated with a Win32 handle. func (f *win32File) closeHandle() { f.wgLock.Lock() // Atomically set that we are closing, releasing the resources only once. if !f.closing.Swap(true) { f.wgLock.Unlock() // cancel all IO and wait for it to complete _ = cancelIoEx(f.handle, nil) f.wg.Wait() // at this point, no new IO can start windows.Close(f.handle) f.handle = 0 } else { f.wgLock.Unlock() } } // Close closes a win32File. func (f *win32File) Close() error { f.closeHandle() return nil } // IsClosed checks if the file has been closed. func (f *win32File) IsClosed() bool { return f.closing.Load() } // prepareIO prepares for a new IO operation. // The caller must call f.wg.Done() when the IO is finished, prior to Close() returning. func (f *win32File) prepareIO() (*ioOperation, error) { f.wgLock.RLock() if f.closing.Load() { f.wgLock.RUnlock() return nil, ErrFileClosed } f.wg.Add(1) f.wgLock.RUnlock() c := &ioOperation{} c.ch = make(chan ioResult) return c, nil } // ioCompletionProcessor processes completed async IOs forever. func ioCompletionProcessor(h windows.Handle) { for { var bytes uint32 var key uintptr var op *ioOperation err := getQueuedCompletionStatus(h, &bytes, &key, &op, windows.INFINITE) if op == nil { panic(err) } op.ch <- ioResult{bytes, err} } } // todo: helsaawy - create an asyncIO version that takes a context // asyncIO processes the return value from ReadFile or WriteFile, blocking until // the operation has actually completed. func (f *win32File) asyncIO(c *ioOperation, d *deadlineHandler, bytes uint32, err error) (int, error) { if err != windows.ERROR_IO_PENDING { //nolint:errorlint // err is Errno return int(bytes), err } if f.closing.Load() { _ = cancelIoEx(f.handle, &c.o) } var timeout timeoutChan if d != nil { d.channelLock.Lock() timeout = d.channel d.channelLock.Unlock() } var r ioResult select { case r = <-c.ch: err = r.err if err == windows.ERROR_OPERATION_ABORTED { //nolint:errorlint // err is Errno if f.closing.Load() { err = ErrFileClosed } } else if err != nil && f.socket { // err is from Win32. Query the overlapped structure to get the winsock error. var bytes, flags uint32 err = wsaGetOverlappedResult(f.handle, &c.o, &bytes, false, &flags) } case <-timeout: _ = cancelIoEx(f.handle, &c.o) r = <-c.ch err = r.err if err == windows.ERROR_OPERATION_ABORTED { //nolint:errorlint // err is Errno err = ErrTimeout } } // runtime.KeepAlive is needed, as c is passed via native // code to ioCompletionProcessor, c must remain alive // until the channel read is complete. // todo: (de)allocate *ioOperation via win32 heap functions, instead of needing to KeepAlive? runtime.KeepAlive(c) return int(r.bytes), err } // Read reads from a file handle. func (f *win32File) Read(b []byte) (int, error) { c, err := f.prepareIO() if err != nil { return 0, err } defer f.wg.Done() if f.readDeadline.timedout.Load() { return 0, ErrTimeout } var bytes uint32 err = windows.ReadFile(f.handle, b, &bytes, &c.o) n, err := f.asyncIO(c, &f.readDeadline, bytes, err) runtime.KeepAlive(b) // Handle EOF conditions. if err == nil && n == 0 && len(b) != 0 { return 0, io.EOF } else if err == windows.ERROR_BROKEN_PIPE { //nolint:errorlint // err is Errno return 0, io.EOF } return n, err } // Write writes to a file handle. func (f *win32File) Write(b []byte) (int, error) { c, err := f.prepareIO() if err != nil { return 0, err } defer f.wg.Done() if f.writeDeadline.timedout.Load() { return 0, ErrTimeout } var bytes uint32 err = windows.WriteFile(f.handle, b, &bytes, &c.o) n, err := f.asyncIO(c, &f.writeDeadline, bytes, err) runtime.KeepAlive(b) return n, err } func (f *win32File) SetReadDeadline(deadline time.Time) error { return f.readDeadline.set(deadline) } func (f *win32File) SetWriteDeadline(deadline time.Time) error { return f.writeDeadline.set(deadline) } func (f *win32File) Flush() error { return windows.FlushFileBuffers(f.handle) } func (f *win32File) Fd() uintptr { return uintptr(f.handle) } func (d *deadlineHandler) set(deadline time.Time) error { d.setLock.Lock() defer d.setLock.Unlock() if d.timer != nil { if !d.timer.Stop() { <-d.channel } d.timer = nil } d.timedout.Store(false) select { case <-d.channel: d.channelLock.Lock() d.channel = make(chan struct{}) d.channelLock.Unlock() default: } if deadline.IsZero() { return nil } timeoutIO := func() { d.timedout.Store(true) close(d.channel) } now := time.Now() duration := deadline.Sub(now) if deadline.After(now) { // Deadline is in the future, set a timer to wait d.timer = time.AfterFunc(duration, timeoutIO) } else { // Deadline is in the past. Cancel all pending IO now. timeoutIO() } return nil }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/Microsoft/go-winio/sd.go
vendor/github.com/Microsoft/go-winio/sd.go
//go:build windows // +build windows package winio import ( "errors" "fmt" "unsafe" "golang.org/x/sys/windows" ) //sys lookupAccountName(systemName *uint16, accountName string, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) = advapi32.LookupAccountNameW //sys lookupAccountSid(systemName *uint16, sid *byte, name *uint16, nameSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) = advapi32.LookupAccountSidW //sys convertSidToStringSid(sid *byte, str **uint16) (err error) = advapi32.ConvertSidToStringSidW //sys convertStringSidToSid(str *uint16, sid **byte) (err error) = advapi32.ConvertStringSidToSidW type AccountLookupError struct { Name string Err error } func (e *AccountLookupError) Error() string { if e.Name == "" { return "lookup account: empty account name specified" } var s string switch { case errors.Is(e.Err, windows.ERROR_INVALID_SID): s = "the security ID structure is invalid" case errors.Is(e.Err, windows.ERROR_NONE_MAPPED): s = "not found" default: s = e.Err.Error() } return "lookup account " + e.Name + ": " + s } func (e *AccountLookupError) Unwrap() error { return e.Err } type SddlConversionError struct { Sddl string Err error } func (e *SddlConversionError) Error() string { return "convert " + e.Sddl + ": " + e.Err.Error() } func (e *SddlConversionError) Unwrap() error { return e.Err } // LookupSidByName looks up the SID of an account by name // //revive:disable-next-line:var-naming SID, not Sid func LookupSidByName(name string) (sid string, err error) { if name == "" { return "", &AccountLookupError{name, windows.ERROR_NONE_MAPPED} } var sidSize, sidNameUse, refDomainSize uint32 err = lookupAccountName(nil, name, nil, &sidSize, nil, &refDomainSize, &sidNameUse) if err != nil && err != windows.ERROR_INSUFFICIENT_BUFFER { //nolint:errorlint // err is Errno return "", &AccountLookupError{name, err} } sidBuffer := make([]byte, sidSize) refDomainBuffer := make([]uint16, refDomainSize) err = lookupAccountName(nil, name, &sidBuffer[0], &sidSize, &refDomainBuffer[0], &refDomainSize, &sidNameUse) if err != nil { return "", &AccountLookupError{name, err} } var strBuffer *uint16 err = convertSidToStringSid(&sidBuffer[0], &strBuffer) if err != nil { return "", &AccountLookupError{name, err} } sid = windows.UTF16ToString((*[0xffff]uint16)(unsafe.Pointer(strBuffer))[:]) _, _ = windows.LocalFree(windows.Handle(unsafe.Pointer(strBuffer))) return sid, nil } // LookupNameBySid looks up the name of an account by SID // //revive:disable-next-line:var-naming SID, not Sid func LookupNameBySid(sid string) (name string, err error) { if sid == "" { return "", &AccountLookupError{sid, windows.ERROR_NONE_MAPPED} } sidBuffer, err := windows.UTF16PtrFromString(sid) if err != nil { return "", &AccountLookupError{sid, err} } var sidPtr *byte if err = convertStringSidToSid(sidBuffer, &sidPtr); err != nil { return "", &AccountLookupError{sid, err} } defer windows.LocalFree(windows.Handle(unsafe.Pointer(sidPtr))) //nolint:errcheck var nameSize, refDomainSize, sidNameUse uint32 err = lookupAccountSid(nil, sidPtr, nil, &nameSize, nil, &refDomainSize, &sidNameUse) if err != nil && err != windows.ERROR_INSUFFICIENT_BUFFER { //nolint:errorlint // err is Errno return "", &AccountLookupError{sid, err} } nameBuffer := make([]uint16, nameSize) refDomainBuffer := make([]uint16, refDomainSize) err = lookupAccountSid(nil, sidPtr, &nameBuffer[0], &nameSize, &refDomainBuffer[0], &refDomainSize, &sidNameUse) if err != nil { return "", &AccountLookupError{sid, err} } name = windows.UTF16ToString(nameBuffer) return name, nil } func SddlToSecurityDescriptor(sddl string) ([]byte, error) { sd, err := windows.SecurityDescriptorFromString(sddl) if err != nil { return nil, &SddlConversionError{Sddl: sddl, Err: err} } b := unsafe.Slice((*byte)(unsafe.Pointer(sd)), sd.Length()) return b, nil } func SecurityDescriptorToSddl(sd []byte) (string, error) { if l := int(unsafe.Sizeof(windows.SECURITY_DESCRIPTOR{})); len(sd) < l { return "", fmt.Errorf("SecurityDescriptor (%d) smaller than expected (%d): %w", len(sd), l, windows.ERROR_INCORRECT_SIZE) } s := (*windows.SECURITY_DESCRIPTOR)(unsafe.Pointer(&sd[0])) return s.String(), nil }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/Microsoft/go-winio/privilege.go
vendor/github.com/Microsoft/go-winio/privilege.go
//go:build windows // +build windows package winio import ( "bytes" "encoding/binary" "fmt" "runtime" "sync" "unicode/utf16" "golang.org/x/sys/windows" ) //sys adjustTokenPrivileges(token windows.Token, releaseAll bool, input *byte, outputSize uint32, output *byte, requiredSize *uint32) (success bool, err error) [true] = advapi32.AdjustTokenPrivileges //sys impersonateSelf(level uint32) (err error) = advapi32.ImpersonateSelf //sys revertToSelf() (err error) = advapi32.RevertToSelf //sys openThreadToken(thread windows.Handle, accessMask uint32, openAsSelf bool, token *windows.Token) (err error) = advapi32.OpenThreadToken //sys getCurrentThread() (h windows.Handle) = GetCurrentThread //sys lookupPrivilegeValue(systemName string, name string, luid *uint64) (err error) = advapi32.LookupPrivilegeValueW //sys lookupPrivilegeName(systemName string, luid *uint64, buffer *uint16, size *uint32) (err error) = advapi32.LookupPrivilegeNameW //sys lookupPrivilegeDisplayName(systemName string, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) = advapi32.LookupPrivilegeDisplayNameW const ( //revive:disable-next-line:var-naming ALL_CAPS SE_PRIVILEGE_ENABLED = windows.SE_PRIVILEGE_ENABLED //revive:disable-next-line:var-naming ALL_CAPS ERROR_NOT_ALL_ASSIGNED windows.Errno = windows.ERROR_NOT_ALL_ASSIGNED SeBackupPrivilege = "SeBackupPrivilege" SeRestorePrivilege = "SeRestorePrivilege" SeSecurityPrivilege = "SeSecurityPrivilege" ) var ( privNames = make(map[string]uint64) privNameMutex sync.Mutex ) // PrivilegeError represents an error enabling privileges. type PrivilegeError struct { privileges []uint64 } func (e *PrivilegeError) Error() string { s := "Could not enable privilege " if len(e.privileges) > 1 { s = "Could not enable privileges " } for i, p := range e.privileges { if i != 0 { s += ", " } s += `"` s += getPrivilegeName(p) s += `"` } return s } // RunWithPrivilege enables a single privilege for a function call. func RunWithPrivilege(name string, fn func() error) error { return RunWithPrivileges([]string{name}, fn) } // RunWithPrivileges enables privileges for a function call. func RunWithPrivileges(names []string, fn func() error) error { privileges, err := mapPrivileges(names) if err != nil { return err } runtime.LockOSThread() defer runtime.UnlockOSThread() token, err := newThreadToken() if err != nil { return err } defer releaseThreadToken(token) err = adjustPrivileges(token, privileges, SE_PRIVILEGE_ENABLED) if err != nil { return err } return fn() } func mapPrivileges(names []string) ([]uint64, error) { privileges := make([]uint64, 0, len(names)) privNameMutex.Lock() defer privNameMutex.Unlock() for _, name := range names { p, ok := privNames[name] if !ok { err := lookupPrivilegeValue("", name, &p) if err != nil { return nil, err } privNames[name] = p } privileges = append(privileges, p) } return privileges, nil } // EnableProcessPrivileges enables privileges globally for the process. func EnableProcessPrivileges(names []string) error { return enableDisableProcessPrivilege(names, SE_PRIVILEGE_ENABLED) } // DisableProcessPrivileges disables privileges globally for the process. func DisableProcessPrivileges(names []string) error { return enableDisableProcessPrivilege(names, 0) } func enableDisableProcessPrivilege(names []string, action uint32) error { privileges, err := mapPrivileges(names) if err != nil { return err } p := windows.CurrentProcess() var token windows.Token err = windows.OpenProcessToken(p, windows.TOKEN_ADJUST_PRIVILEGES|windows.TOKEN_QUERY, &token) if err != nil { return err } defer token.Close() return adjustPrivileges(token, privileges, action) } func adjustPrivileges(token windows.Token, privileges []uint64, action uint32) error { var b bytes.Buffer _ = binary.Write(&b, binary.LittleEndian, uint32(len(privileges))) for _, p := range privileges { _ = binary.Write(&b, binary.LittleEndian, p) _ = binary.Write(&b, binary.LittleEndian, action) } prevState := make([]byte, b.Len()) reqSize := uint32(0) success, err := adjustTokenPrivileges(token, false, &b.Bytes()[0], uint32(len(prevState)), &prevState[0], &reqSize) if !success { return err } if err == ERROR_NOT_ALL_ASSIGNED { //nolint:errorlint // err is Errno return &PrivilegeError{privileges} } return nil } func getPrivilegeName(luid uint64) string { var nameBuffer [256]uint16 bufSize := uint32(len(nameBuffer)) err := lookupPrivilegeName("", &luid, &nameBuffer[0], &bufSize) if err != nil { return fmt.Sprintf("<unknown privilege %d>", luid) } var displayNameBuffer [256]uint16 displayBufSize := uint32(len(displayNameBuffer)) var langID uint32 err = lookupPrivilegeDisplayName("", &nameBuffer[0], &displayNameBuffer[0], &displayBufSize, &langID) if err != nil { return fmt.Sprintf("<unknown privilege %s>", string(utf16.Decode(nameBuffer[:bufSize]))) } return string(utf16.Decode(displayNameBuffer[:displayBufSize])) } func newThreadToken() (windows.Token, error) { err := impersonateSelf(windows.SecurityImpersonation) if err != nil { return 0, err } var token windows.Token err = openThreadToken(getCurrentThread(), windows.TOKEN_ADJUST_PRIVILEGES|windows.TOKEN_QUERY, false, &token) if err != nil { rerr := revertToSelf() if rerr != nil { panic(rerr) } return 0, err } return token, nil } func releaseThreadToken(h windows.Token) { err := revertToSelf() if err != nil { panic(err) } h.Close() }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/Microsoft/go-winio/hvsock.go
vendor/github.com/Microsoft/go-winio/hvsock.go
//go:build windows // +build windows package winio import ( "context" "errors" "fmt" "io" "net" "os" "time" "unsafe" "golang.org/x/sys/windows" "github.com/Microsoft/go-winio/internal/socket" "github.com/Microsoft/go-winio/pkg/guid" ) const afHVSock = 34 // AF_HYPERV // Well known Service and VM IDs // https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/user-guide/make-integration-service#vmid-wildcards // HvsockGUIDWildcard is the wildcard VmId for accepting connections from all partitions. func HvsockGUIDWildcard() guid.GUID { // 00000000-0000-0000-0000-000000000000 return guid.GUID{} } // HvsockGUIDBroadcast is the wildcard VmId for broadcasting sends to all partitions. func HvsockGUIDBroadcast() guid.GUID { // ffffffff-ffff-ffff-ffff-ffffffffffff return guid.GUID{ Data1: 0xffffffff, Data2: 0xffff, Data3: 0xffff, Data4: [8]uint8{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}, } } // HvsockGUIDLoopback is the Loopback VmId for accepting connections to the same partition as the connector. func HvsockGUIDLoopback() guid.GUID { // e0e16197-dd56-4a10-9195-5ee7a155a838 return guid.GUID{ Data1: 0xe0e16197, Data2: 0xdd56, Data3: 0x4a10, Data4: [8]uint8{0x91, 0x95, 0x5e, 0xe7, 0xa1, 0x55, 0xa8, 0x38}, } } // HvsockGUIDSiloHost is the address of a silo's host partition: // - The silo host of a hosted silo is the utility VM. // - The silo host of a silo on a physical host is the physical host. func HvsockGUIDSiloHost() guid.GUID { // 36bd0c5c-7276-4223-88ba-7d03b654c568 return guid.GUID{ Data1: 0x36bd0c5c, Data2: 0x7276, Data3: 0x4223, Data4: [8]byte{0x88, 0xba, 0x7d, 0x03, 0xb6, 0x54, 0xc5, 0x68}, } } // HvsockGUIDChildren is the wildcard VmId for accepting connections from the connector's child partitions. func HvsockGUIDChildren() guid.GUID { // 90db8b89-0d35-4f79-8ce9-49ea0ac8b7cd return guid.GUID{ Data1: 0x90db8b89, Data2: 0xd35, Data3: 0x4f79, Data4: [8]uint8{0x8c, 0xe9, 0x49, 0xea, 0xa, 0xc8, 0xb7, 0xcd}, } } // HvsockGUIDParent is the wildcard VmId for accepting connections from the connector's parent partition. // Listening on this VmId accepts connection from: // - Inside silos: silo host partition. // - Inside hosted silo: host of the VM. // - Inside VM: VM host. // - Physical host: Not supported. func HvsockGUIDParent() guid.GUID { // a42e7cda-d03f-480c-9cc2-a4de20abb878 return guid.GUID{ Data1: 0xa42e7cda, Data2: 0xd03f, Data3: 0x480c, Data4: [8]uint8{0x9c, 0xc2, 0xa4, 0xde, 0x20, 0xab, 0xb8, 0x78}, } } // hvsockVsockServiceTemplate is the Service GUID used for the VSOCK protocol. func hvsockVsockServiceTemplate() guid.GUID { // 00000000-facb-11e6-bd58-64006a7986d3 return guid.GUID{ Data2: 0xfacb, Data3: 0x11e6, Data4: [8]uint8{0xbd, 0x58, 0x64, 0x00, 0x6a, 0x79, 0x86, 0xd3}, } } // An HvsockAddr is an address for a AF_HYPERV socket. type HvsockAddr struct { VMID guid.GUID ServiceID guid.GUID } type rawHvsockAddr struct { Family uint16 _ uint16 VMID guid.GUID ServiceID guid.GUID } var _ socket.RawSockaddr = &rawHvsockAddr{} // Network returns the address's network name, "hvsock". func (*HvsockAddr) Network() string { return "hvsock" } func (addr *HvsockAddr) String() string { return fmt.Sprintf("%s:%s", &addr.VMID, &addr.ServiceID) } // VsockServiceID returns an hvsock service ID corresponding to the specified AF_VSOCK port. func VsockServiceID(port uint32) guid.GUID { g := hvsockVsockServiceTemplate() // make a copy g.Data1 = port return g } func (addr *HvsockAddr) raw() rawHvsockAddr { return rawHvsockAddr{ Family: afHVSock, VMID: addr.VMID, ServiceID: addr.ServiceID, } } func (addr *HvsockAddr) fromRaw(raw *rawHvsockAddr) { addr.VMID = raw.VMID addr.ServiceID = raw.ServiceID } // Sockaddr returns a pointer to and the size of this struct. // // Implements the [socket.RawSockaddr] interface, and allows use in // [socket.Bind] and [socket.ConnectEx]. func (r *rawHvsockAddr) Sockaddr() (unsafe.Pointer, int32, error) { return unsafe.Pointer(r), int32(unsafe.Sizeof(rawHvsockAddr{})), nil } // Sockaddr interface allows use with `sockets.Bind()` and `.ConnectEx()`. func (r *rawHvsockAddr) FromBytes(b []byte) error { n := int(unsafe.Sizeof(rawHvsockAddr{})) if len(b) < n { return fmt.Errorf("got %d, want %d: %w", len(b), n, socket.ErrBufferSize) } copy(unsafe.Slice((*byte)(unsafe.Pointer(r)), n), b[:n]) if r.Family != afHVSock { return fmt.Errorf("got %d, want %d: %w", r.Family, afHVSock, socket.ErrAddrFamily) } return nil } // HvsockListener is a socket listener for the AF_HYPERV address family. type HvsockListener struct { sock *win32File addr HvsockAddr } var _ net.Listener = &HvsockListener{} // HvsockConn is a connected socket of the AF_HYPERV address family. type HvsockConn struct { sock *win32File local, remote HvsockAddr } var _ net.Conn = &HvsockConn{} func newHVSocket() (*win32File, error) { fd, err := windows.Socket(afHVSock, windows.SOCK_STREAM, 1) if err != nil { return nil, os.NewSyscallError("socket", err) } f, err := makeWin32File(fd) if err != nil { windows.Close(fd) return nil, err } f.socket = true return f, nil } // ListenHvsock listens for connections on the specified hvsock address. func ListenHvsock(addr *HvsockAddr) (_ *HvsockListener, err error) { l := &HvsockListener{addr: *addr} var sock *win32File sock, err = newHVSocket() if err != nil { return nil, l.opErr("listen", err) } defer func() { if err != nil { _ = sock.Close() } }() sa := addr.raw() err = socket.Bind(sock.handle, &sa) if err != nil { return nil, l.opErr("listen", os.NewSyscallError("socket", err)) } err = windows.Listen(sock.handle, 16) if err != nil { return nil, l.opErr("listen", os.NewSyscallError("listen", err)) } return &HvsockListener{sock: sock, addr: *addr}, nil } func (l *HvsockListener) opErr(op string, err error) error { return &net.OpError{Op: op, Net: "hvsock", Addr: &l.addr, Err: err} } // Addr returns the listener's network address. func (l *HvsockListener) Addr() net.Addr { return &l.addr } // Accept waits for the next connection and returns it. func (l *HvsockListener) Accept() (_ net.Conn, err error) { sock, err := newHVSocket() if err != nil { return nil, l.opErr("accept", err) } defer func() { if sock != nil { sock.Close() } }() c, err := l.sock.prepareIO() if err != nil { return nil, l.opErr("accept", err) } defer l.sock.wg.Done() // AcceptEx, per documentation, requires an extra 16 bytes per address. // // https://docs.microsoft.com/en-us/windows/win32/api/mswsock/nf-mswsock-acceptex const addrlen = uint32(16 + unsafe.Sizeof(rawHvsockAddr{})) var addrbuf [addrlen * 2]byte var bytes uint32 err = windows.AcceptEx(l.sock.handle, sock.handle, &addrbuf[0], 0 /* rxdatalen */, addrlen, addrlen, &bytes, &c.o) if _, err = l.sock.asyncIO(c, nil, bytes, err); err != nil { return nil, l.opErr("accept", os.NewSyscallError("acceptex", err)) } conn := &HvsockConn{ sock: sock, } // The local address returned in the AcceptEx buffer is the same as the Listener socket's // address. However, the service GUID reported by GetSockName is different from the Listeners // socket, and is sometimes the same as the local address of the socket that dialed the // address, with the service GUID.Data1 incremented, but othertimes is different. // todo: does the local address matter? is the listener's address or the actual address appropriate? conn.local.fromRaw((*rawHvsockAddr)(unsafe.Pointer(&addrbuf[0]))) conn.remote.fromRaw((*rawHvsockAddr)(unsafe.Pointer(&addrbuf[addrlen]))) // initialize the accepted socket and update its properties with those of the listening socket if err = windows.Setsockopt(sock.handle, windows.SOL_SOCKET, windows.SO_UPDATE_ACCEPT_CONTEXT, (*byte)(unsafe.Pointer(&l.sock.handle)), int32(unsafe.Sizeof(l.sock.handle))); err != nil { return nil, conn.opErr("accept", os.NewSyscallError("setsockopt", err)) } sock = nil return conn, nil } // Close closes the listener, causing any pending Accept calls to fail. func (l *HvsockListener) Close() error { return l.sock.Close() } // HvsockDialer configures and dials a Hyper-V Socket (ie, [HvsockConn]). type HvsockDialer struct { // Deadline is the time the Dial operation must connect before erroring. Deadline time.Time // Retries is the number of additional connects to try if the connection times out, is refused, // or the host is unreachable Retries uint // RetryWait is the time to wait after a connection error to retry RetryWait time.Duration rt *time.Timer // redial wait timer } // Dial the Hyper-V socket at addr. // // See [HvsockDialer.Dial] for more information. func Dial(ctx context.Context, addr *HvsockAddr) (conn *HvsockConn, err error) { return (&HvsockDialer{}).Dial(ctx, addr) } // Dial attempts to connect to the Hyper-V socket at addr, and returns a connection if successful. // Will attempt (HvsockDialer).Retries if dialing fails, waiting (HvsockDialer).RetryWait between // retries. // // Dialing can be cancelled either by providing (HvsockDialer).Deadline, or cancelling ctx. func (d *HvsockDialer) Dial(ctx context.Context, addr *HvsockAddr) (conn *HvsockConn, err error) { op := "dial" // create the conn early to use opErr() conn = &HvsockConn{ remote: *addr, } if !d.Deadline.IsZero() { var cancel context.CancelFunc ctx, cancel = context.WithDeadline(ctx, d.Deadline) defer cancel() } // preemptive timeout/cancellation check if err = ctx.Err(); err != nil { return nil, conn.opErr(op, err) } sock, err := newHVSocket() if err != nil { return nil, conn.opErr(op, err) } defer func() { if sock != nil { sock.Close() } }() sa := addr.raw() err = socket.Bind(sock.handle, &sa) if err != nil { return nil, conn.opErr(op, os.NewSyscallError("bind", err)) } c, err := sock.prepareIO() if err != nil { return nil, conn.opErr(op, err) } defer sock.wg.Done() var bytes uint32 for i := uint(0); i <= d.Retries; i++ { err = socket.ConnectEx( sock.handle, &sa, nil, // sendBuf 0, // sendDataLen &bytes, (*windows.Overlapped)(unsafe.Pointer(&c.o))) _, err = sock.asyncIO(c, nil, bytes, err) if i < d.Retries && canRedial(err) { if err = d.redialWait(ctx); err == nil { continue } } break } if err != nil { return nil, conn.opErr(op, os.NewSyscallError("connectex", err)) } // update the connection properties, so shutdown can be used if err = windows.Setsockopt( sock.handle, windows.SOL_SOCKET, windows.SO_UPDATE_CONNECT_CONTEXT, nil, // optvalue 0, // optlen ); err != nil { return nil, conn.opErr(op, os.NewSyscallError("setsockopt", err)) } // get the local name var sal rawHvsockAddr err = socket.GetSockName(sock.handle, &sal) if err != nil { return nil, conn.opErr(op, os.NewSyscallError("getsockname", err)) } conn.local.fromRaw(&sal) // one last check for timeout, since asyncIO doesn't check the context if err = ctx.Err(); err != nil { return nil, conn.opErr(op, err) } conn.sock = sock sock = nil return conn, nil } // redialWait waits before attempting to redial, resetting the timer as appropriate. func (d *HvsockDialer) redialWait(ctx context.Context) (err error) { if d.RetryWait == 0 { return nil } if d.rt == nil { d.rt = time.NewTimer(d.RetryWait) } else { // should already be stopped and drained d.rt.Reset(d.RetryWait) } select { case <-ctx.Done(): case <-d.rt.C: return nil } // stop and drain the timer if !d.rt.Stop() { <-d.rt.C } return ctx.Err() } // assumes error is a plain, unwrapped windows.Errno provided by direct syscall. func canRedial(err error) bool { //nolint:errorlint // guaranteed to be an Errno switch err { case windows.WSAECONNREFUSED, windows.WSAENETUNREACH, windows.WSAETIMEDOUT, windows.ERROR_CONNECTION_REFUSED, windows.ERROR_CONNECTION_UNAVAIL: return true default: return false } } func (conn *HvsockConn) opErr(op string, err error) error { // translate from "file closed" to "socket closed" if errors.Is(err, ErrFileClosed) { err = socket.ErrSocketClosed } return &net.OpError{Op: op, Net: "hvsock", Source: &conn.local, Addr: &conn.remote, Err: err} } func (conn *HvsockConn) Read(b []byte) (int, error) { c, err := conn.sock.prepareIO() if err != nil { return 0, conn.opErr("read", err) } defer conn.sock.wg.Done() buf := windows.WSABuf{Buf: &b[0], Len: uint32(len(b))} var flags, bytes uint32 err = windows.WSARecv(conn.sock.handle, &buf, 1, &bytes, &flags, &c.o, nil) n, err := conn.sock.asyncIO(c, &conn.sock.readDeadline, bytes, err) if err != nil { var eno windows.Errno if errors.As(err, &eno) { err = os.NewSyscallError("wsarecv", eno) } return 0, conn.opErr("read", err) } else if n == 0 { err = io.EOF } return n, err } func (conn *HvsockConn) Write(b []byte) (int, error) { t := 0 for len(b) != 0 { n, err := conn.write(b) if err != nil { return t + n, err } t += n b = b[n:] } return t, nil } func (conn *HvsockConn) write(b []byte) (int, error) { c, err := conn.sock.prepareIO() if err != nil { return 0, conn.opErr("write", err) } defer conn.sock.wg.Done() buf := windows.WSABuf{Buf: &b[0], Len: uint32(len(b))} var bytes uint32 err = windows.WSASend(conn.sock.handle, &buf, 1, &bytes, 0, &c.o, nil) n, err := conn.sock.asyncIO(c, &conn.sock.writeDeadline, bytes, err) if err != nil { var eno windows.Errno if errors.As(err, &eno) { err = os.NewSyscallError("wsasend", eno) } return 0, conn.opErr("write", err) } return n, err } // Close closes the socket connection, failing any pending read or write calls. func (conn *HvsockConn) Close() error { return conn.sock.Close() } func (conn *HvsockConn) IsClosed() bool { return conn.sock.IsClosed() } // shutdown disables sending or receiving on a socket. func (conn *HvsockConn) shutdown(how int) error { if conn.IsClosed() { return socket.ErrSocketClosed } err := windows.Shutdown(conn.sock.handle, how) if err != nil { // If the connection was closed, shutdowns fail with "not connected" if errors.Is(err, windows.WSAENOTCONN) || errors.Is(err, windows.WSAESHUTDOWN) { err = socket.ErrSocketClosed } return os.NewSyscallError("shutdown", err) } return nil } // CloseRead shuts down the read end of the socket, preventing future read operations. func (conn *HvsockConn) CloseRead() error { err := conn.shutdown(windows.SHUT_RD) if err != nil { return conn.opErr("closeread", err) } return nil } // CloseWrite shuts down the write end of the socket, preventing future write operations and // notifying the other endpoint that no more data will be written. func (conn *HvsockConn) CloseWrite() error { err := conn.shutdown(windows.SHUT_WR) if err != nil { return conn.opErr("closewrite", err) } return nil } // LocalAddr returns the local address of the connection. func (conn *HvsockConn) LocalAddr() net.Addr { return &conn.local } // RemoteAddr returns the remote address of the connection. func (conn *HvsockConn) RemoteAddr() net.Addr { return &conn.remote } // SetDeadline implements the net.Conn SetDeadline method. func (conn *HvsockConn) SetDeadline(t time.Time) error { // todo: implement `SetDeadline` for `win32File` if err := conn.SetReadDeadline(t); err != nil { return fmt.Errorf("set read deadline: %w", err) } if err := conn.SetWriteDeadline(t); err != nil { return fmt.Errorf("set write deadline: %w", err) } return nil } // SetReadDeadline implements the net.Conn SetReadDeadline method. func (conn *HvsockConn) SetReadDeadline(t time.Time) error { return conn.sock.SetReadDeadline(t) } // SetWriteDeadline implements the net.Conn SetWriteDeadline method. func (conn *HvsockConn) SetWriteDeadline(t time.Time) error { return conn.sock.SetWriteDeadline(t) }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/Microsoft/go-winio/fileinfo.go
vendor/github.com/Microsoft/go-winio/fileinfo.go
//go:build windows // +build windows package winio import ( "os" "runtime" "unsafe" "golang.org/x/sys/windows" ) // FileBasicInfo contains file access time and file attributes information. type FileBasicInfo struct { CreationTime, LastAccessTime, LastWriteTime, ChangeTime windows.Filetime FileAttributes uint32 _ uint32 // padding } // alignedFileBasicInfo is a FileBasicInfo, but aligned to uint64 by containing // uint64 rather than windows.Filetime. Filetime contains two uint32s. uint64 // alignment is necessary to pass this as FILE_BASIC_INFO. type alignedFileBasicInfo struct { CreationTime, LastAccessTime, LastWriteTime, ChangeTime uint64 FileAttributes uint32 _ uint32 // padding } // GetFileBasicInfo retrieves times and attributes for a file. func GetFileBasicInfo(f *os.File) (*FileBasicInfo, error) { bi := &alignedFileBasicInfo{} if err := windows.GetFileInformationByHandleEx( windows.Handle(f.Fd()), windows.FileBasicInfo, (*byte)(unsafe.Pointer(bi)), uint32(unsafe.Sizeof(*bi)), ); err != nil { return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err} } runtime.KeepAlive(f) // Reinterpret the alignedFileBasicInfo as a FileBasicInfo so it matches the // public API of this module. The data may be unnecessarily aligned. return (*FileBasicInfo)(unsafe.Pointer(bi)), nil } // SetFileBasicInfo sets times and attributes for a file. func SetFileBasicInfo(f *os.File, bi *FileBasicInfo) error { // Create an alignedFileBasicInfo based on a FileBasicInfo. The copy is // suitable to pass to GetFileInformationByHandleEx. biAligned := *(*alignedFileBasicInfo)(unsafe.Pointer(bi)) if err := windows.SetFileInformationByHandle( windows.Handle(f.Fd()), windows.FileBasicInfo, (*byte)(unsafe.Pointer(&biAligned)), uint32(unsafe.Sizeof(biAligned)), ); err != nil { return &os.PathError{Op: "SetFileInformationByHandle", Path: f.Name(), Err: err} } runtime.KeepAlive(f) return nil } // FileStandardInfo contains extended information for the file. // FILE_STANDARD_INFO in WinBase.h // https://docs.microsoft.com/en-us/windows/win32/api/winbase/ns-winbase-file_standard_info type FileStandardInfo struct { AllocationSize, EndOfFile int64 NumberOfLinks uint32 DeletePending, Directory bool } // GetFileStandardInfo retrieves ended information for the file. func GetFileStandardInfo(f *os.File) (*FileStandardInfo, error) { si := &FileStandardInfo{} if err := windows.GetFileInformationByHandleEx(windows.Handle(f.Fd()), windows.FileStandardInfo, (*byte)(unsafe.Pointer(si)), uint32(unsafe.Sizeof(*si))); err != nil { return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err} } runtime.KeepAlive(f) return si, nil } // FileIDInfo contains the volume serial number and file ID for a file. This pair should be // unique on a system. type FileIDInfo struct { VolumeSerialNumber uint64 FileID [16]byte } // GetFileID retrieves the unique (volume, file ID) pair for a file. func GetFileID(f *os.File) (*FileIDInfo, error) { fileID := &FileIDInfo{} if err := windows.GetFileInformationByHandleEx( windows.Handle(f.Fd()), windows.FileIdInfo, (*byte)(unsafe.Pointer(fileID)), uint32(unsafe.Sizeof(*fileID)), ); err != nil { return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err} } runtime.KeepAlive(f) return fileID, nil }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/Microsoft/go-winio/reparse.go
vendor/github.com/Microsoft/go-winio/reparse.go
//go:build windows // +build windows package winio import ( "bytes" "encoding/binary" "fmt" "strings" "unicode/utf16" "unsafe" ) const ( reparseTagMountPoint = 0xA0000003 reparseTagSymlink = 0xA000000C ) type reparseDataBuffer struct { ReparseTag uint32 ReparseDataLength uint16 Reserved uint16 SubstituteNameOffset uint16 SubstituteNameLength uint16 PrintNameOffset uint16 PrintNameLength uint16 } // ReparsePoint describes a Win32 symlink or mount point. type ReparsePoint struct { Target string IsMountPoint bool } // UnsupportedReparsePointError is returned when trying to decode a non-symlink or // mount point reparse point. type UnsupportedReparsePointError struct { Tag uint32 } func (e *UnsupportedReparsePointError) Error() string { return fmt.Sprintf("unsupported reparse point %x", e.Tag) } // DecodeReparsePoint decodes a Win32 REPARSE_DATA_BUFFER structure containing either a symlink // or a mount point. func DecodeReparsePoint(b []byte) (*ReparsePoint, error) { tag := binary.LittleEndian.Uint32(b[0:4]) return DecodeReparsePointData(tag, b[8:]) } func DecodeReparsePointData(tag uint32, b []byte) (*ReparsePoint, error) { isMountPoint := false switch tag { case reparseTagMountPoint: isMountPoint = true case reparseTagSymlink: default: return nil, &UnsupportedReparsePointError{tag} } nameOffset := 8 + binary.LittleEndian.Uint16(b[4:6]) if !isMountPoint { nameOffset += 4 } nameLength := binary.LittleEndian.Uint16(b[6:8]) name := make([]uint16, nameLength/2) err := binary.Read(bytes.NewReader(b[nameOffset:nameOffset+nameLength]), binary.LittleEndian, &name) if err != nil { return nil, err } return &ReparsePoint{string(utf16.Decode(name)), isMountPoint}, nil } func isDriveLetter(c byte) bool { return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') } // EncodeReparsePoint encodes a Win32 REPARSE_DATA_BUFFER structure describing a symlink or // mount point. func EncodeReparsePoint(rp *ReparsePoint) []byte { // Generate an NT path and determine if this is a relative path. var ntTarget string relative := false if strings.HasPrefix(rp.Target, `\\?\`) { ntTarget = `\??\` + rp.Target[4:] } else if strings.HasPrefix(rp.Target, `\\`) { ntTarget = `\??\UNC\` + rp.Target[2:] } else if len(rp.Target) >= 2 && isDriveLetter(rp.Target[0]) && rp.Target[1] == ':' { ntTarget = `\??\` + rp.Target } else { ntTarget = rp.Target relative = true } // The paths must be NUL-terminated even though they are counted strings. target16 := utf16.Encode([]rune(rp.Target + "\x00")) ntTarget16 := utf16.Encode([]rune(ntTarget + "\x00")) size := int(unsafe.Sizeof(reparseDataBuffer{})) - 8 size += len(ntTarget16)*2 + len(target16)*2 tag := uint32(reparseTagMountPoint) if !rp.IsMountPoint { tag = reparseTagSymlink size += 4 // Add room for symlink flags } data := reparseDataBuffer{ ReparseTag: tag, ReparseDataLength: uint16(size), SubstituteNameOffset: 0, SubstituteNameLength: uint16((len(ntTarget16) - 1) * 2), PrintNameOffset: uint16(len(ntTarget16) * 2), PrintNameLength: uint16((len(target16) - 1) * 2), } var b bytes.Buffer _ = binary.Write(&b, binary.LittleEndian, &data) if !rp.IsMountPoint { flags := uint32(0) if relative { flags |= 1 } _ = binary.Write(&b, binary.LittleEndian, flags) } _ = binary.Write(&b, binary.LittleEndian, ntTarget16) _ = binary.Write(&b, binary.LittleEndian, target16) return b.Bytes() }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/Microsoft/go-winio/syscall.go
vendor/github.com/Microsoft/go-winio/syscall.go
//go:build windows package winio //go:generate go run github.com/Microsoft/go-winio/tools/mkwinsyscall -output zsyscall_windows.go ./*.go
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/Microsoft/go-winio/backup.go
vendor/github.com/Microsoft/go-winio/backup.go
//go:build windows // +build windows package winio import ( "encoding/binary" "errors" "fmt" "io" "os" "runtime" "unicode/utf16" "github.com/Microsoft/go-winio/internal/fs" "golang.org/x/sys/windows" ) //sys backupRead(h windows.Handle, b []byte, bytesRead *uint32, abort bool, processSecurity bool, context *uintptr) (err error) = BackupRead //sys backupWrite(h windows.Handle, b []byte, bytesWritten *uint32, abort bool, processSecurity bool, context *uintptr) (err error) = BackupWrite const ( BackupData = uint32(iota + 1) BackupEaData BackupSecurity BackupAlternateData BackupLink BackupPropertyData BackupObjectId //revive:disable-line:var-naming ID, not Id BackupReparseData BackupSparseBlock BackupTxfsData ) const ( StreamSparseAttributes = uint32(8) ) //nolint:revive // var-naming: ALL_CAPS const ( WRITE_DAC = windows.WRITE_DAC WRITE_OWNER = windows.WRITE_OWNER ACCESS_SYSTEM_SECURITY = windows.ACCESS_SYSTEM_SECURITY ) // BackupHeader represents a backup stream of a file. type BackupHeader struct { //revive:disable-next-line:var-naming ID, not Id Id uint32 // The backup stream ID Attributes uint32 // Stream attributes Size int64 // The size of the stream in bytes Name string // The name of the stream (for BackupAlternateData only). Offset int64 // The offset of the stream in the file (for BackupSparseBlock only). } type win32StreamID struct { StreamID uint32 Attributes uint32 Size uint64 NameSize uint32 } // BackupStreamReader reads from a stream produced by the BackupRead Win32 API and produces a series // of BackupHeader values. type BackupStreamReader struct { r io.Reader bytesLeft int64 } // NewBackupStreamReader produces a BackupStreamReader from any io.Reader. func NewBackupStreamReader(r io.Reader) *BackupStreamReader { return &BackupStreamReader{r, 0} } // Next returns the next backup stream and prepares for calls to Read(). It skips the remainder of the current stream if // it was not completely read. func (r *BackupStreamReader) Next() (*BackupHeader, error) { if r.bytesLeft > 0 { //nolint:nestif // todo: flatten this if s, ok := r.r.(io.Seeker); ok { // Make sure Seek on io.SeekCurrent sometimes succeeds // before trying the actual seek. if _, err := s.Seek(0, io.SeekCurrent); err == nil { if _, err = s.Seek(r.bytesLeft, io.SeekCurrent); err != nil { return nil, err } r.bytesLeft = 0 } } if _, err := io.Copy(io.Discard, r); err != nil { return nil, err } } var wsi win32StreamID if err := binary.Read(r.r, binary.LittleEndian, &wsi); err != nil { return nil, err } hdr := &BackupHeader{ Id: wsi.StreamID, Attributes: wsi.Attributes, Size: int64(wsi.Size), } if wsi.NameSize != 0 { name := make([]uint16, int(wsi.NameSize/2)) if err := binary.Read(r.r, binary.LittleEndian, name); err != nil { return nil, err } hdr.Name = windows.UTF16ToString(name) } if wsi.StreamID == BackupSparseBlock { if err := binary.Read(r.r, binary.LittleEndian, &hdr.Offset); err != nil { return nil, err } hdr.Size -= 8 } r.bytesLeft = hdr.Size return hdr, nil } // Read reads from the current backup stream. func (r *BackupStreamReader) Read(b []byte) (int, error) { if r.bytesLeft == 0 { return 0, io.EOF } if int64(len(b)) > r.bytesLeft { b = b[:r.bytesLeft] } n, err := r.r.Read(b) r.bytesLeft -= int64(n) if err == io.EOF { err = io.ErrUnexpectedEOF } else if r.bytesLeft == 0 && err == nil { err = io.EOF } return n, err } // BackupStreamWriter writes a stream compatible with the BackupWrite Win32 API. type BackupStreamWriter struct { w io.Writer bytesLeft int64 } // NewBackupStreamWriter produces a BackupStreamWriter on top of an io.Writer. func NewBackupStreamWriter(w io.Writer) *BackupStreamWriter { return &BackupStreamWriter{w, 0} } // WriteHeader writes the next backup stream header and prepares for calls to Write(). func (w *BackupStreamWriter) WriteHeader(hdr *BackupHeader) error { if w.bytesLeft != 0 { return fmt.Errorf("missing %d bytes", w.bytesLeft) } name := utf16.Encode([]rune(hdr.Name)) wsi := win32StreamID{ StreamID: hdr.Id, Attributes: hdr.Attributes, Size: uint64(hdr.Size), NameSize: uint32(len(name) * 2), } if hdr.Id == BackupSparseBlock { // Include space for the int64 block offset wsi.Size += 8 } if err := binary.Write(w.w, binary.LittleEndian, &wsi); err != nil { return err } if len(name) != 0 { if err := binary.Write(w.w, binary.LittleEndian, name); err != nil { return err } } if hdr.Id == BackupSparseBlock { if err := binary.Write(w.w, binary.LittleEndian, hdr.Offset); err != nil { return err } } w.bytesLeft = hdr.Size return nil } // Write writes to the current backup stream. func (w *BackupStreamWriter) Write(b []byte) (int, error) { if w.bytesLeft < int64(len(b)) { return 0, fmt.Errorf("too many bytes by %d", int64(len(b))-w.bytesLeft) } n, err := w.w.Write(b) w.bytesLeft -= int64(n) return n, err } // BackupFileReader provides an io.ReadCloser interface on top of the BackupRead Win32 API. type BackupFileReader struct { f *os.File includeSecurity bool ctx uintptr } // NewBackupFileReader returns a new BackupFileReader from a file handle. If includeSecurity is true, // Read will attempt to read the security descriptor of the file. func NewBackupFileReader(f *os.File, includeSecurity bool) *BackupFileReader { r := &BackupFileReader{f, includeSecurity, 0} return r } // Read reads a backup stream from the file by calling the Win32 API BackupRead(). func (r *BackupFileReader) Read(b []byte) (int, error) { var bytesRead uint32 err := backupRead(windows.Handle(r.f.Fd()), b, &bytesRead, false, r.includeSecurity, &r.ctx) if err != nil { return 0, &os.PathError{Op: "BackupRead", Path: r.f.Name(), Err: err} } runtime.KeepAlive(r.f) if bytesRead == 0 { return 0, io.EOF } return int(bytesRead), nil } // Close frees Win32 resources associated with the BackupFileReader. It does not close // the underlying file. func (r *BackupFileReader) Close() error { if r.ctx != 0 { _ = backupRead(windows.Handle(r.f.Fd()), nil, nil, true, false, &r.ctx) runtime.KeepAlive(r.f) r.ctx = 0 } return nil } // BackupFileWriter provides an io.WriteCloser interface on top of the BackupWrite Win32 API. type BackupFileWriter struct { f *os.File includeSecurity bool ctx uintptr } // NewBackupFileWriter returns a new BackupFileWriter from a file handle. If includeSecurity is true, // Write() will attempt to restore the security descriptor from the stream. func NewBackupFileWriter(f *os.File, includeSecurity bool) *BackupFileWriter { w := &BackupFileWriter{f, includeSecurity, 0} return w } // Write restores a portion of the file using the provided backup stream. func (w *BackupFileWriter) Write(b []byte) (int, error) { var bytesWritten uint32 err := backupWrite(windows.Handle(w.f.Fd()), b, &bytesWritten, false, w.includeSecurity, &w.ctx) if err != nil { return 0, &os.PathError{Op: "BackupWrite", Path: w.f.Name(), Err: err} } runtime.KeepAlive(w.f) if int(bytesWritten) != len(b) { return int(bytesWritten), errors.New("not all bytes could be written") } return len(b), nil } // Close frees Win32 resources associated with the BackupFileWriter. It does not // close the underlying file. func (w *BackupFileWriter) Close() error { if w.ctx != 0 { _ = backupWrite(windows.Handle(w.f.Fd()), nil, nil, true, false, &w.ctx) runtime.KeepAlive(w.f) w.ctx = 0 } return nil } // OpenForBackup opens a file or directory, potentially skipping access checks if the backup // or restore privileges have been acquired. // // If the file opened was a directory, it cannot be used with Readdir(). func OpenForBackup(path string, access uint32, share uint32, createmode uint32) (*os.File, error) { h, err := fs.CreateFile(path, fs.AccessMask(access), fs.FileShareMode(share), nil, fs.FileCreationDisposition(createmode), fs.FILE_FLAG_BACKUP_SEMANTICS|fs.FILE_FLAG_OPEN_REPARSE_POINT, 0, ) if err != nil { err = &os.PathError{Op: "open", Path: path, Err: err} return nil, err } return os.NewFile(uintptr(h), path), nil }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/Microsoft/go-winio/ea.go
vendor/github.com/Microsoft/go-winio/ea.go
package winio import ( "bytes" "encoding/binary" "errors" ) type fileFullEaInformation struct { NextEntryOffset uint32 Flags uint8 NameLength uint8 ValueLength uint16 } var ( fileFullEaInformationSize = binary.Size(&fileFullEaInformation{}) errInvalidEaBuffer = errors.New("invalid extended attribute buffer") errEaNameTooLarge = errors.New("extended attribute name too large") errEaValueTooLarge = errors.New("extended attribute value too large") ) // ExtendedAttribute represents a single Windows EA. type ExtendedAttribute struct { Name string Value []byte Flags uint8 } func parseEa(b []byte) (ea ExtendedAttribute, nb []byte, err error) { var info fileFullEaInformation err = binary.Read(bytes.NewReader(b), binary.LittleEndian, &info) if err != nil { err = errInvalidEaBuffer return ea, nb, err } nameOffset := fileFullEaInformationSize nameLen := int(info.NameLength) valueOffset := nameOffset + int(info.NameLength) + 1 valueLen := int(info.ValueLength) nextOffset := int(info.NextEntryOffset) if valueLen+valueOffset > len(b) || nextOffset < 0 || nextOffset > len(b) { err = errInvalidEaBuffer return ea, nb, err } ea.Name = string(b[nameOffset : nameOffset+nameLen]) ea.Value = b[valueOffset : valueOffset+valueLen] ea.Flags = info.Flags if info.NextEntryOffset != 0 { nb = b[info.NextEntryOffset:] } return ea, nb, err } // DecodeExtendedAttributes decodes a list of EAs from a FILE_FULL_EA_INFORMATION // buffer retrieved from BackupRead, ZwQueryEaFile, etc. func DecodeExtendedAttributes(b []byte) (eas []ExtendedAttribute, err error) { for len(b) != 0 { ea, nb, err := parseEa(b) if err != nil { return nil, err } eas = append(eas, ea) b = nb } return eas, err } func writeEa(buf *bytes.Buffer, ea *ExtendedAttribute, last bool) error { if int(uint8(len(ea.Name))) != len(ea.Name) { return errEaNameTooLarge } if int(uint16(len(ea.Value))) != len(ea.Value) { return errEaValueTooLarge } entrySize := uint32(fileFullEaInformationSize + len(ea.Name) + 1 + len(ea.Value)) withPadding := (entrySize + 3) &^ 3 nextOffset := uint32(0) if !last { nextOffset = withPadding } info := fileFullEaInformation{ NextEntryOffset: nextOffset, Flags: ea.Flags, NameLength: uint8(len(ea.Name)), ValueLength: uint16(len(ea.Value)), } err := binary.Write(buf, binary.LittleEndian, &info) if err != nil { return err } _, err = buf.Write([]byte(ea.Name)) if err != nil { return err } err = buf.WriteByte(0) if err != nil { return err } _, err = buf.Write(ea.Value) if err != nil { return err } _, err = buf.Write([]byte{0, 0, 0}[0 : withPadding-entrySize]) if err != nil { return err } return nil } // EncodeExtendedAttributes encodes a list of EAs into a FILE_FULL_EA_INFORMATION // buffer for use with BackupWrite, ZwSetEaFile, etc. func EncodeExtendedAttributes(eas []ExtendedAttribute) ([]byte, error) { var buf bytes.Buffer for i := range eas { last := false if i == len(eas)-1 { last = true } err := writeEa(&buf, &eas[i], last) if err != nil { return nil, err } } return buf.Bytes(), nil }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/Microsoft/go-winio/doc.go
vendor/github.com/Microsoft/go-winio/doc.go
// This package provides utilities for efficiently performing Win32 IO operations in Go. // Currently, this package is provides support for genreal IO and management of // - named pipes // - files // - [Hyper-V sockets] // // This code is similar to Go's [net] package, and uses IO completion ports to avoid // blocking IO on system threads, allowing Go to reuse the thread to schedule other goroutines. // // This limits support to Windows Vista and newer operating systems. // // Additionally, this package provides support for: // - creating and managing GUIDs // - writing to [ETW] // - opening and manageing VHDs // - parsing [Windows Image files] // - auto-generating Win32 API code // // [Hyper-V sockets]: https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/user-guide/make-integration-service // [ETW]: https://docs.microsoft.com/en-us/windows-hardware/drivers/devtest/event-tracing-for-windows--etw- // [Windows Image files]: https://docs.microsoft.com/en-us/windows-hardware/manufacture/desktop/work-with-windows-images package winio
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/Microsoft/go-winio/pipe.go
vendor/github.com/Microsoft/go-winio/pipe.go
//go:build windows // +build windows package winio import ( "context" "errors" "fmt" "io" "net" "os" "runtime" "time" "unsafe" "golang.org/x/sys/windows" "github.com/Microsoft/go-winio/internal/fs" ) //sys connectNamedPipe(pipe windows.Handle, o *windows.Overlapped) (err error) = ConnectNamedPipe //sys createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *windows.SecurityAttributes) (handle windows.Handle, err error) [failretval==windows.InvalidHandle] = CreateNamedPipeW //sys disconnectNamedPipe(pipe windows.Handle) (err error) = DisconnectNamedPipe //sys getNamedPipeInfo(pipe windows.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) = GetNamedPipeInfo //sys getNamedPipeHandleState(pipe windows.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) = GetNamedPipeHandleStateW //sys ntCreateNamedPipeFile(pipe *windows.Handle, access ntAccessMask, oa *objectAttributes, iosb *ioStatusBlock, share ntFileShareMode, disposition ntFileCreationDisposition, options ntFileOptions, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (status ntStatus) = ntdll.NtCreateNamedPipeFile //sys rtlNtStatusToDosError(status ntStatus) (winerr error) = ntdll.RtlNtStatusToDosErrorNoTeb //sys rtlDosPathNameToNtPathName(name *uint16, ntName *unicodeString, filePart uintptr, reserved uintptr) (status ntStatus) = ntdll.RtlDosPathNameToNtPathName_U //sys rtlDefaultNpAcl(dacl *uintptr) (status ntStatus) = ntdll.RtlDefaultNpAcl type PipeConn interface { net.Conn Disconnect() error Flush() error } // type aliases for mkwinsyscall code type ( ntAccessMask = fs.AccessMask ntFileShareMode = fs.FileShareMode ntFileCreationDisposition = fs.NTFileCreationDisposition ntFileOptions = fs.NTCreateOptions ) type ioStatusBlock struct { Status, Information uintptr } // typedef struct _OBJECT_ATTRIBUTES { // ULONG Length; // HANDLE RootDirectory; // PUNICODE_STRING ObjectName; // ULONG Attributes; // PVOID SecurityDescriptor; // PVOID SecurityQualityOfService; // } OBJECT_ATTRIBUTES; // // https://learn.microsoft.com/en-us/windows/win32/api/ntdef/ns-ntdef-_object_attributes type objectAttributes struct { Length uintptr RootDirectory uintptr ObjectName *unicodeString Attributes uintptr SecurityDescriptor *securityDescriptor SecurityQoS uintptr } type unicodeString struct { Length uint16 MaximumLength uint16 Buffer uintptr } // typedef struct _SECURITY_DESCRIPTOR { // BYTE Revision; // BYTE Sbz1; // SECURITY_DESCRIPTOR_CONTROL Control; // PSID Owner; // PSID Group; // PACL Sacl; // PACL Dacl; // } SECURITY_DESCRIPTOR, *PISECURITY_DESCRIPTOR; // // https://learn.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-security_descriptor type securityDescriptor struct { Revision byte Sbz1 byte Control uint16 Owner uintptr Group uintptr Sacl uintptr //revive:disable-line:var-naming SACL, not Sacl Dacl uintptr //revive:disable-line:var-naming DACL, not Dacl } type ntStatus int32 func (status ntStatus) Err() error { if status >= 0 { return nil } return rtlNtStatusToDosError(status) } var ( // ErrPipeListenerClosed is returned for pipe operations on listeners that have been closed. ErrPipeListenerClosed = net.ErrClosed errPipeWriteClosed = errors.New("pipe has been closed for write") ) type win32Pipe struct { *win32File path string } var _ PipeConn = (*win32Pipe)(nil) type win32MessageBytePipe struct { win32Pipe writeClosed bool readEOF bool } type pipeAddress string func (f *win32Pipe) LocalAddr() net.Addr { return pipeAddress(f.path) } func (f *win32Pipe) RemoteAddr() net.Addr { return pipeAddress(f.path) } func (f *win32Pipe) SetDeadline(t time.Time) error { if err := f.SetReadDeadline(t); err != nil { return err } return f.SetWriteDeadline(t) } func (f *win32Pipe) Disconnect() error { return disconnectNamedPipe(f.win32File.handle) } // CloseWrite closes the write side of a message pipe in byte mode. func (f *win32MessageBytePipe) CloseWrite() error { if f.writeClosed { return errPipeWriteClosed } err := f.win32File.Flush() if err != nil { return err } _, err = f.win32File.Write(nil) if err != nil { return err } f.writeClosed = true return nil } // Write writes bytes to a message pipe in byte mode. Zero-byte writes are ignored, since // they are used to implement CloseWrite(). func (f *win32MessageBytePipe) Write(b []byte) (int, error) { if f.writeClosed { return 0, errPipeWriteClosed } if len(b) == 0 { return 0, nil } return f.win32File.Write(b) } // Read reads bytes from a message pipe in byte mode. A read of a zero-byte message on a message // mode pipe will return io.EOF, as will all subsequent reads. func (f *win32MessageBytePipe) Read(b []byte) (int, error) { if f.readEOF { return 0, io.EOF } n, err := f.win32File.Read(b) if err == io.EOF { //nolint:errorlint // If this was the result of a zero-byte read, then // it is possible that the read was due to a zero-size // message. Since we are simulating CloseWrite with a // zero-byte message, ensure that all future Read() calls // also return EOF. f.readEOF = true } else if err == windows.ERROR_MORE_DATA { //nolint:errorlint // err is Errno // ERROR_MORE_DATA indicates that the pipe's read mode is message mode // and the message still has more bytes. Treat this as a success, since // this package presents all named pipes as byte streams. err = nil } return n, err } func (pipeAddress) Network() string { return "pipe" } func (s pipeAddress) String() string { return string(s) } // tryDialPipe attempts to dial the pipe at `path` until `ctx` cancellation or timeout. func tryDialPipe(ctx context.Context, path *string, access fs.AccessMask, impLevel PipeImpLevel) (windows.Handle, error) { for { select { case <-ctx.Done(): return windows.Handle(0), ctx.Err() default: h, err := fs.CreateFile(*path, access, 0, // mode nil, // security attributes fs.OPEN_EXISTING, fs.FILE_FLAG_OVERLAPPED|fs.SECURITY_SQOS_PRESENT|fs.FileSQSFlag(impLevel), 0, // template file handle ) if err == nil { return h, nil } if err != windows.ERROR_PIPE_BUSY { //nolint:errorlint // err is Errno return h, &os.PathError{Err: err, Op: "open", Path: *path} } // Wait 10 msec and try again. This is a rather simplistic // view, as we always try each 10 milliseconds. time.Sleep(10 * time.Millisecond) } } } // DialPipe connects to a named pipe by path, timing out if the connection // takes longer than the specified duration. If timeout is nil, then we use // a default timeout of 2 seconds. (We do not use WaitNamedPipe.) func DialPipe(path string, timeout *time.Duration) (net.Conn, error) { var absTimeout time.Time if timeout != nil { absTimeout = time.Now().Add(*timeout) } else { absTimeout = time.Now().Add(2 * time.Second) } ctx, cancel := context.WithDeadline(context.Background(), absTimeout) defer cancel() conn, err := DialPipeContext(ctx, path) if errors.Is(err, context.DeadlineExceeded) { return nil, ErrTimeout } return conn, err } // DialPipeContext attempts to connect to a named pipe by `path` until `ctx` // cancellation or timeout. func DialPipeContext(ctx context.Context, path string) (net.Conn, error) { return DialPipeAccess(ctx, path, uint32(fs.GENERIC_READ|fs.GENERIC_WRITE)) } // PipeImpLevel is an enumeration of impersonation levels that may be set // when calling DialPipeAccessImpersonation. type PipeImpLevel uint32 const ( PipeImpLevelAnonymous = PipeImpLevel(fs.SECURITY_ANONYMOUS) PipeImpLevelIdentification = PipeImpLevel(fs.SECURITY_IDENTIFICATION) PipeImpLevelImpersonation = PipeImpLevel(fs.SECURITY_IMPERSONATION) PipeImpLevelDelegation = PipeImpLevel(fs.SECURITY_DELEGATION) ) // DialPipeAccess attempts to connect to a named pipe by `path` with `access` until `ctx` // cancellation or timeout. func DialPipeAccess(ctx context.Context, path string, access uint32) (net.Conn, error) { return DialPipeAccessImpLevel(ctx, path, access, PipeImpLevelAnonymous) } // DialPipeAccessImpLevel attempts to connect to a named pipe by `path` with // `access` at `impLevel` until `ctx` cancellation or timeout. The other // DialPipe* implementations use PipeImpLevelAnonymous. func DialPipeAccessImpLevel(ctx context.Context, path string, access uint32, impLevel PipeImpLevel) (net.Conn, error) { var err error var h windows.Handle h, err = tryDialPipe(ctx, &path, fs.AccessMask(access), impLevel) if err != nil { return nil, err } var flags uint32 err = getNamedPipeInfo(h, &flags, nil, nil, nil) if err != nil { return nil, err } f, err := makeWin32File(h) if err != nil { windows.Close(h) return nil, err } // If the pipe is in message mode, return a message byte pipe, which // supports CloseWrite(). if flags&windows.PIPE_TYPE_MESSAGE != 0 { return &win32MessageBytePipe{ win32Pipe: win32Pipe{win32File: f, path: path}, }, nil } return &win32Pipe{win32File: f, path: path}, nil } type acceptResponse struct { f *win32File err error } type win32PipeListener struct { firstHandle windows.Handle path string config PipeConfig acceptCh chan (chan acceptResponse) closeCh chan int doneCh chan int } func makeServerPipeHandle(path string, sd []byte, c *PipeConfig, first bool) (windows.Handle, error) { path16, err := windows.UTF16FromString(path) if err != nil { return 0, &os.PathError{Op: "open", Path: path, Err: err} } var oa objectAttributes oa.Length = unsafe.Sizeof(oa) var ntPath unicodeString if err := rtlDosPathNameToNtPathName(&path16[0], &ntPath, 0, 0, ).Err(); err != nil { return 0, &os.PathError{Op: "open", Path: path, Err: err} } defer windows.LocalFree(windows.Handle(ntPath.Buffer)) //nolint:errcheck oa.ObjectName = &ntPath oa.Attributes = windows.OBJ_CASE_INSENSITIVE // The security descriptor is only needed for the first pipe. if first { if sd != nil { //todo: does `sdb` need to be allocated on the heap, or can go allocate it? l := uint32(len(sd)) sdb, err := windows.LocalAlloc(0, l) if err != nil { return 0, fmt.Errorf("LocalAlloc for security descriptor with of length %d: %w", l, err) } defer windows.LocalFree(windows.Handle(sdb)) //nolint:errcheck copy((*[0xffff]byte)(unsafe.Pointer(sdb))[:], sd) oa.SecurityDescriptor = (*securityDescriptor)(unsafe.Pointer(sdb)) } else { // Construct the default named pipe security descriptor. var dacl uintptr if err := rtlDefaultNpAcl(&dacl).Err(); err != nil { return 0, fmt.Errorf("getting default named pipe ACL: %w", err) } defer windows.LocalFree(windows.Handle(dacl)) //nolint:errcheck sdb := &securityDescriptor{ Revision: 1, Control: windows.SE_DACL_PRESENT, Dacl: dacl, } oa.SecurityDescriptor = sdb } } typ := uint32(windows.FILE_PIPE_REJECT_REMOTE_CLIENTS) if c.MessageMode { typ |= windows.FILE_PIPE_MESSAGE_TYPE } disposition := fs.FILE_OPEN access := fs.GENERIC_READ | fs.GENERIC_WRITE | fs.SYNCHRONIZE if first { disposition = fs.FILE_CREATE // By not asking for read or write access, the named pipe file system // will put this pipe into an initially disconnected state, blocking // client connections until the next call with first == false. access = fs.SYNCHRONIZE } timeout := int64(-50 * 10000) // 50ms var ( h windows.Handle iosb ioStatusBlock ) err = ntCreateNamedPipeFile(&h, access, &oa, &iosb, fs.FILE_SHARE_READ|fs.FILE_SHARE_WRITE, disposition, 0, typ, 0, 0, 0xffffffff, uint32(c.InputBufferSize), uint32(c.OutputBufferSize), &timeout).Err() if err != nil { return 0, &os.PathError{Op: "open", Path: path, Err: err} } runtime.KeepAlive(ntPath) return h, nil } func (l *win32PipeListener) makeServerPipe() (*win32File, error) { h, err := makeServerPipeHandle(l.path, nil, &l.config, false) if err != nil { return nil, err } f, err := makeWin32File(h) if err != nil { windows.Close(h) return nil, err } return f, nil } func (l *win32PipeListener) makeConnectedServerPipe() (*win32File, error) { p, err := l.makeServerPipe() if err != nil { return nil, err } // Wait for the client to connect. ch := make(chan error) go func(p *win32File) { ch <- connectPipe(p) }(p) select { case err = <-ch: if err != nil { p.Close() p = nil } case <-l.closeCh: // Abort the connect request by closing the handle. p.Close() p = nil err = <-ch if err == nil || err == ErrFileClosed { //nolint:errorlint // err is Errno err = ErrPipeListenerClosed } } return p, err } func (l *win32PipeListener) listenerRoutine() { closed := false for !closed { select { case <-l.closeCh: closed = true case responseCh := <-l.acceptCh: var ( p *win32File err error ) for { p, err = l.makeConnectedServerPipe() // If the connection was immediately closed by the client, try // again. if err != windows.ERROR_NO_DATA { //nolint:errorlint // err is Errno break } } responseCh <- acceptResponse{p, err} closed = err == ErrPipeListenerClosed //nolint:errorlint // err is Errno } } windows.Close(l.firstHandle) l.firstHandle = 0 // Notify Close() and Accept() callers that the handle has been closed. close(l.doneCh) } // PipeConfig contain configuration for the pipe listener. type PipeConfig struct { // SecurityDescriptor contains a Windows security descriptor in SDDL format. SecurityDescriptor string // MessageMode determines whether the pipe is in byte or message mode. In either // case the pipe is read in byte mode by default. The only practical difference in // this implementation is that CloseWrite() is only supported for message mode pipes; // CloseWrite() is implemented as a zero-byte write, but zero-byte writes are only // transferred to the reader (and returned as io.EOF in this implementation) // when the pipe is in message mode. MessageMode bool // InputBufferSize specifies the size of the input buffer, in bytes. InputBufferSize int32 // OutputBufferSize specifies the size of the output buffer, in bytes. OutputBufferSize int32 } // ListenPipe creates a listener on a Windows named pipe path, e.g. \\.\pipe\mypipe. // The pipe must not already exist. func ListenPipe(path string, c *PipeConfig) (net.Listener, error) { var ( sd []byte err error ) if c == nil { c = &PipeConfig{} } if c.SecurityDescriptor != "" { sd, err = SddlToSecurityDescriptor(c.SecurityDescriptor) if err != nil { return nil, err } } h, err := makeServerPipeHandle(path, sd, c, true) if err != nil { return nil, err } l := &win32PipeListener{ firstHandle: h, path: path, config: *c, acceptCh: make(chan (chan acceptResponse)), closeCh: make(chan int), doneCh: make(chan int), } go l.listenerRoutine() return l, nil } func connectPipe(p *win32File) error { c, err := p.prepareIO() if err != nil { return err } defer p.wg.Done() err = connectNamedPipe(p.handle, &c.o) _, err = p.asyncIO(c, nil, 0, err) if err != nil && err != windows.ERROR_PIPE_CONNECTED { //nolint:errorlint // err is Errno return err } return nil } func (l *win32PipeListener) Accept() (net.Conn, error) { ch := make(chan acceptResponse) select { case l.acceptCh <- ch: response := <-ch err := response.err if err != nil { return nil, err } if l.config.MessageMode { return &win32MessageBytePipe{ win32Pipe: win32Pipe{win32File: response.f, path: l.path}, }, nil } return &win32Pipe{win32File: response.f, path: l.path}, nil case <-l.doneCh: return nil, ErrPipeListenerClosed } } func (l *win32PipeListener) Close() error { select { case l.closeCh <- 1: <-l.doneCh case <-l.doneCh: } return nil } func (l *win32PipeListener) Addr() net.Addr { return pipeAddress(l.path) }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_nonwindows.go
vendor/github.com/Microsoft/go-winio/pkg/guid/guid_nonwindows.go
//go:build !windows // +build !windows package guid // GUID represents a GUID/UUID. It has the same structure as // golang.org/x/sys/windows.GUID so that it can be used with functions expecting // that type. It is defined as its own type as that is only available to builds // targeted at `windows`. The representation matches that used by native Windows // code. type GUID struct { Data1 uint32 Data2 uint16 Data3 uint16 Data4 [8]byte }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_windows.go
vendor/github.com/Microsoft/go-winio/pkg/guid/guid_windows.go
//go:build windows // +build windows package guid import "golang.org/x/sys/windows" // GUID represents a GUID/UUID. It has the same structure as // golang.org/x/sys/windows.GUID so that it can be used with functions expecting // that type. It is defined as its own type so that stringification and // marshaling can be supported. The representation matches that used by native // Windows code. type GUID windows.GUID
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/Microsoft/go-winio/pkg/guid/variant_string.go
vendor/github.com/Microsoft/go-winio/pkg/guid/variant_string.go
// Code generated by "stringer -type=Variant -trimprefix=Variant -linecomment"; DO NOT EDIT. package guid import "strconv" func _() { // An "invalid array index" compiler error signifies that the constant values have changed. // Re-run the stringer command to generate them again. var x [1]struct{} _ = x[VariantUnknown-0] _ = x[VariantNCS-1] _ = x[VariantRFC4122-2] _ = x[VariantMicrosoft-3] _ = x[VariantFuture-4] } const _Variant_name = "UnknownNCSRFC 4122MicrosoftFuture" var _Variant_index = [...]uint8{0, 7, 10, 18, 27, 33} func (i Variant) String() string { if i >= Variant(len(_Variant_index)-1) { return "Variant(" + strconv.FormatInt(int64(i), 10) + ")" } return _Variant_name[_Variant_index[i]:_Variant_index[i+1]] }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go
vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go
// Package guid provides a GUID type. The backing structure for a GUID is // identical to that used by the golang.org/x/sys/windows GUID type. // There are two main binary encodings used for a GUID, the big-endian encoding, // and the Windows (mixed-endian) encoding. See here for details: // https://en.wikipedia.org/wiki/Universally_unique_identifier#Encoding package guid import ( "crypto/rand" "crypto/sha1" //nolint:gosec // not used for secure application "encoding" "encoding/binary" "fmt" "strconv" ) //go:generate go run golang.org/x/tools/cmd/stringer -type=Variant -trimprefix=Variant -linecomment // Variant specifies which GUID variant (or "type") of the GUID. It determines // how the entirety of the rest of the GUID is interpreted. type Variant uint8 // The variants specified by RFC 4122 section 4.1.1. const ( // VariantUnknown specifies a GUID variant which does not conform to one of // the variant encodings specified in RFC 4122. VariantUnknown Variant = iota VariantNCS VariantRFC4122 // RFC 4122 VariantMicrosoft VariantFuture ) // Version specifies how the bits in the GUID were generated. For instance, a // version 4 GUID is randomly generated, and a version 5 is generated from the // hash of an input string. type Version uint8 func (v Version) String() string { return strconv.FormatUint(uint64(v), 10) } var _ = (encoding.TextMarshaler)(GUID{}) var _ = (encoding.TextUnmarshaler)(&GUID{}) // NewV4 returns a new version 4 (pseudorandom) GUID, as defined by RFC 4122. func NewV4() (GUID, error) { var b [16]byte if _, err := rand.Read(b[:]); err != nil { return GUID{}, err } g := FromArray(b) g.setVersion(4) // Version 4 means randomly generated. g.setVariant(VariantRFC4122) return g, nil } // NewV5 returns a new version 5 (generated from a string via SHA-1 hashing) // GUID, as defined by RFC 4122. The RFC is unclear on the encoding of the name, // and the sample code treats it as a series of bytes, so we do the same here. // // Some implementations, such as those found on Windows, treat the name as a // big-endian UTF16 stream of bytes. If that is desired, the string can be // encoded as such before being passed to this function. func NewV5(namespace GUID, name []byte) (GUID, error) { b := sha1.New() //nolint:gosec // not used for secure application namespaceBytes := namespace.ToArray() b.Write(namespaceBytes[:]) b.Write(name) a := [16]byte{} copy(a[:], b.Sum(nil)) g := FromArray(a) g.setVersion(5) // Version 5 means generated from a string. g.setVariant(VariantRFC4122) return g, nil } func fromArray(b [16]byte, order binary.ByteOrder) GUID { var g GUID g.Data1 = order.Uint32(b[0:4]) g.Data2 = order.Uint16(b[4:6]) g.Data3 = order.Uint16(b[6:8]) copy(g.Data4[:], b[8:16]) return g } func (g GUID) toArray(order binary.ByteOrder) [16]byte { b := [16]byte{} order.PutUint32(b[0:4], g.Data1) order.PutUint16(b[4:6], g.Data2) order.PutUint16(b[6:8], g.Data3) copy(b[8:16], g.Data4[:]) return b } // FromArray constructs a GUID from a big-endian encoding array of 16 bytes. func FromArray(b [16]byte) GUID { return fromArray(b, binary.BigEndian) } // ToArray returns an array of 16 bytes representing the GUID in big-endian // encoding. func (g GUID) ToArray() [16]byte { return g.toArray(binary.BigEndian) } // FromWindowsArray constructs a GUID from a Windows encoding array of bytes. func FromWindowsArray(b [16]byte) GUID { return fromArray(b, binary.LittleEndian) } // ToWindowsArray returns an array of 16 bytes representing the GUID in Windows // encoding. func (g GUID) ToWindowsArray() [16]byte { return g.toArray(binary.LittleEndian) } func (g GUID) String() string { return fmt.Sprintf( "%08x-%04x-%04x-%04x-%012x", g.Data1, g.Data2, g.Data3, g.Data4[:2], g.Data4[2:]) } // FromString parses a string containing a GUID and returns the GUID. The only // format currently supported is the `xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx` // format. func FromString(s string) (GUID, error) { if len(s) != 36 { return GUID{}, fmt.Errorf("invalid GUID %q", s) } if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' { return GUID{}, fmt.Errorf("invalid GUID %q", s) } var g GUID data1, err := strconv.ParseUint(s[0:8], 16, 32) if err != nil { return GUID{}, fmt.Errorf("invalid GUID %q", s) } g.Data1 = uint32(data1) data2, err := strconv.ParseUint(s[9:13], 16, 16) if err != nil { return GUID{}, fmt.Errorf("invalid GUID %q", s) } g.Data2 = uint16(data2) data3, err := strconv.ParseUint(s[14:18], 16, 16) if err != nil { return GUID{}, fmt.Errorf("invalid GUID %q", s) } g.Data3 = uint16(data3) for i, x := range []int{19, 21, 24, 26, 28, 30, 32, 34} { v, err := strconv.ParseUint(s[x:x+2], 16, 8) if err != nil { return GUID{}, fmt.Errorf("invalid GUID %q", s) } g.Data4[i] = uint8(v) } return g, nil } func (g *GUID) setVariant(v Variant) { d := g.Data4[0] switch v { case VariantNCS: d = (d & 0x7f) case VariantRFC4122: d = (d & 0x3f) | 0x80 case VariantMicrosoft: d = (d & 0x1f) | 0xc0 case VariantFuture: d = (d & 0x0f) | 0xe0 case VariantUnknown: fallthrough default: panic(fmt.Sprintf("invalid variant: %d", v)) } g.Data4[0] = d } // Variant returns the GUID variant, as defined in RFC 4122. func (g GUID) Variant() Variant { b := g.Data4[0] if b&0x80 == 0 { return VariantNCS } else if b&0xc0 == 0x80 { return VariantRFC4122 } else if b&0xe0 == 0xc0 { return VariantMicrosoft } else if b&0xe0 == 0xe0 { return VariantFuture } return VariantUnknown } func (g *GUID) setVersion(v Version) { g.Data3 = (g.Data3 & 0x0fff) | (uint16(v) << 12) } // Version returns the GUID version, as defined in RFC 4122. func (g GUID) Version() Version { return Version((g.Data3 & 0xF000) >> 12) } // MarshalText returns the textual representation of the GUID. func (g GUID) MarshalText() ([]byte, error) { return []byte(g.String()), nil } // UnmarshalText takes the textual representation of a GUID, and unmarhals it // into this GUID. func (g *GUID) UnmarshalText(text []byte) error { g2, err := FromString(string(text)) if err != nil { return err } *g = g2 return nil }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/Microsoft/go-winio/internal/stringbuffer/wstring.go
vendor/github.com/Microsoft/go-winio/internal/stringbuffer/wstring.go
package stringbuffer import ( "sync" "unicode/utf16" ) // TODO: worth exporting and using in mkwinsyscall? // Uint16BufferSize is the buffer size in the pool, chosen somewhat arbitrarily to accommodate // large path strings: // MAX_PATH (260) + size of volume GUID prefix (49) + null terminator = 310. const MinWStringCap = 310 // use *[]uint16 since []uint16 creates an extra allocation where the slice header // is copied to heap and then referenced via pointer in the interface header that sync.Pool // stores. var pathPool = sync.Pool{ // if go1.18+ adds Pool[T], use that to store []uint16 directly New: func() interface{} { b := make([]uint16, MinWStringCap) return &b }, } func newBuffer() []uint16 { return *(pathPool.Get().(*[]uint16)) } // freeBuffer copies the slice header data, and puts a pointer to that in the pool. // This avoids taking a pointer to the slice header in WString, which can be set to nil. func freeBuffer(b []uint16) { pathPool.Put(&b) } // WString is a wide string buffer ([]uint16) meant for storing UTF-16 encoded strings // for interacting with Win32 APIs. // Sizes are specified as uint32 and not int. // // It is not thread safe. type WString struct { // type-def allows casting to []uint16 directly, use struct to prevent that and allow adding fields in the future. // raw buffer b []uint16 } // NewWString returns a [WString] allocated from a shared pool with an // initial capacity of at least [MinWStringCap]. // Since the buffer may have been previously used, its contents are not guaranteed to be empty. // // The buffer should be freed via [WString.Free] func NewWString() *WString { return &WString{ b: newBuffer(), } } func (b *WString) Free() { if b.empty() { return } freeBuffer(b.b) b.b = nil } // ResizeTo grows the buffer to at least c and returns the new capacity, freeing the // previous buffer back into pool. func (b *WString) ResizeTo(c uint32) uint32 { // already sufficient (or n is 0) if c <= b.Cap() { return b.Cap() } if c <= MinWStringCap { c = MinWStringCap } // allocate at-least double buffer size, as is done in [bytes.Buffer] and other places if c <= 2*b.Cap() { c = 2 * b.Cap() } b2 := make([]uint16, c) if !b.empty() { copy(b2, b.b) freeBuffer(b.b) } b.b = b2 return c } // Buffer returns the underlying []uint16 buffer. func (b *WString) Buffer() []uint16 { if b.empty() { return nil } return b.b } // Pointer returns a pointer to the first uint16 in the buffer. // If the [WString.Free] has already been called, the pointer will be nil. func (b *WString) Pointer() *uint16 { if b.empty() { return nil } return &b.b[0] } // String returns the returns the UTF-8 encoding of the UTF-16 string in the buffer. // // It assumes that the data is null-terminated. func (b *WString) String() string { // Using [windows.UTF16ToString] would require importing "golang.org/x/sys/windows" // and would make this code Windows-only, which makes no sense. // So copy UTF16ToString code into here. // If other windows-specific code is added, switch to [windows.UTF16ToString] s := b.b for i, v := range s { if v == 0 { s = s[:i] break } } return string(utf16.Decode(s)) } // Cap returns the underlying buffer capacity. func (b *WString) Cap() uint32 { if b.empty() { return 0 } return b.cap() } func (b *WString) cap() uint32 { return uint32(cap(b.b)) } func (b *WString) empty() bool { return b == nil || b.cap() == 0 }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/Microsoft/go-winio/internal/socket/rawaddr.go
vendor/github.com/Microsoft/go-winio/internal/socket/rawaddr.go
package socket import ( "unsafe" ) // RawSockaddr allows structs to be used with [Bind] and [ConnectEx]. The // struct must meet the Win32 sockaddr requirements specified here: // https://docs.microsoft.com/en-us/windows/win32/winsock/sockaddr-2 // // Specifically, the struct size must be least larger than an int16 (unsigned short) // for the address family. type RawSockaddr interface { // Sockaddr returns a pointer to the RawSockaddr and its struct size, allowing // for the RawSockaddr's data to be overwritten by syscalls (if necessary). // // It is the callers responsibility to validate that the values are valid; invalid // pointers or size can cause a panic. Sockaddr() (unsafe.Pointer, int32, error) }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/Microsoft/go-winio/internal/socket/zsyscall_windows.go
vendor/github.com/Microsoft/go-winio/internal/socket/zsyscall_windows.go
//go:build windows // Code generated by 'go generate' using "github.com/Microsoft/go-winio/tools/mkwinsyscall"; DO NOT EDIT. package socket import ( "syscall" "unsafe" "golang.org/x/sys/windows" ) var _ unsafe.Pointer // Do the interface allocations only once for common // Errno values. const ( errnoERROR_IO_PENDING = 997 ) var ( errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) errERROR_EINVAL error = syscall.EINVAL ) // errnoErr returns common boxed Errno values, to prevent // allocations at runtime. func errnoErr(e syscall.Errno) error { switch e { case 0: return errERROR_EINVAL case errnoERROR_IO_PENDING: return errERROR_IO_PENDING } return e } var ( modws2_32 = windows.NewLazySystemDLL("ws2_32.dll") procbind = modws2_32.NewProc("bind") procgetpeername = modws2_32.NewProc("getpeername") procgetsockname = modws2_32.NewProc("getsockname") ) func bind(s windows.Handle, name unsafe.Pointer, namelen int32) (err error) { r1, _, e1 := syscall.SyscallN(procbind.Addr(), uintptr(s), uintptr(name), uintptr(namelen)) if r1 == socketError { err = errnoErr(e1) } return } func getpeername(s windows.Handle, name unsafe.Pointer, namelen *int32) (err error) { r1, _, e1 := syscall.SyscallN(procgetpeername.Addr(), uintptr(s), uintptr(name), uintptr(unsafe.Pointer(namelen))) if r1 == socketError { err = errnoErr(e1) } return } func getsockname(s windows.Handle, name unsafe.Pointer, namelen *int32) (err error) { r1, _, e1 := syscall.SyscallN(procgetsockname.Addr(), uintptr(s), uintptr(name), uintptr(unsafe.Pointer(namelen))) if r1 == socketError { err = errnoErr(e1) } return }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/Microsoft/go-winio/internal/socket/socket.go
vendor/github.com/Microsoft/go-winio/internal/socket/socket.go
//go:build windows package socket import ( "errors" "fmt" "net" "sync" "syscall" "unsafe" "github.com/Microsoft/go-winio/pkg/guid" "golang.org/x/sys/windows" ) //go:generate go run github.com/Microsoft/go-winio/tools/mkwinsyscall -output zsyscall_windows.go socket.go //sys getsockname(s windows.Handle, name unsafe.Pointer, namelen *int32) (err error) [failretval==socketError] = ws2_32.getsockname //sys getpeername(s windows.Handle, name unsafe.Pointer, namelen *int32) (err error) [failretval==socketError] = ws2_32.getpeername //sys bind(s windows.Handle, name unsafe.Pointer, namelen int32) (err error) [failretval==socketError] = ws2_32.bind const socketError = uintptr(^uint32(0)) var ( // todo(helsaawy): create custom error types to store the desired vs actual size and addr family? ErrBufferSize = errors.New("buffer size") ErrAddrFamily = errors.New("address family") ErrInvalidPointer = errors.New("invalid pointer") ErrSocketClosed = fmt.Errorf("socket closed: %w", net.ErrClosed) ) // todo(helsaawy): replace these with generics, ie: GetSockName[S RawSockaddr](s windows.Handle) (S, error) // GetSockName writes the local address of socket s to the [RawSockaddr] rsa. // If rsa is not large enough, the [windows.WSAEFAULT] is returned. func GetSockName(s windows.Handle, rsa RawSockaddr) error { ptr, l, err := rsa.Sockaddr() if err != nil { return fmt.Errorf("could not retrieve socket pointer and size: %w", err) } // although getsockname returns WSAEFAULT if the buffer is too small, it does not set // &l to the correct size, so--apart from doubling the buffer repeatedly--there is no remedy return getsockname(s, ptr, &l) } // GetPeerName returns the remote address the socket is connected to. // // See [GetSockName] for more information. func GetPeerName(s windows.Handle, rsa RawSockaddr) error { ptr, l, err := rsa.Sockaddr() if err != nil { return fmt.Errorf("could not retrieve socket pointer and size: %w", err) } return getpeername(s, ptr, &l) } func Bind(s windows.Handle, rsa RawSockaddr) (err error) { ptr, l, err := rsa.Sockaddr() if err != nil { return fmt.Errorf("could not retrieve socket pointer and size: %w", err) } return bind(s, ptr, l) } // "golang.org/x/sys/windows".ConnectEx and .Bind only accept internal implementations of the // their sockaddr interface, so they cannot be used with HvsockAddr // Replicate functionality here from // https://cs.opensource.google/go/x/sys/+/master:windows/syscall_windows.go // The function pointers to `AcceptEx`, `ConnectEx` and `GetAcceptExSockaddrs` must be loaded at // runtime via a WSAIoctl call: // https://docs.microsoft.com/en-us/windows/win32/api/Mswsock/nc-mswsock-lpfn_connectex#remarks type runtimeFunc struct { id guid.GUID once sync.Once addr uintptr err error } func (f *runtimeFunc) Load() error { f.once.Do(func() { var s windows.Handle s, f.err = windows.Socket(windows.AF_INET, windows.SOCK_STREAM, windows.IPPROTO_TCP) if f.err != nil { return } defer windows.CloseHandle(s) //nolint:errcheck var n uint32 f.err = windows.WSAIoctl(s, windows.SIO_GET_EXTENSION_FUNCTION_POINTER, (*byte)(unsafe.Pointer(&f.id)), uint32(unsafe.Sizeof(f.id)), (*byte)(unsafe.Pointer(&f.addr)), uint32(unsafe.Sizeof(f.addr)), &n, nil, // overlapped 0, // completionRoutine ) }) return f.err } var ( // todo: add `AcceptEx` and `GetAcceptExSockaddrs` WSAID_CONNECTEX = guid.GUID{ //revive:disable-line:var-naming ALL_CAPS Data1: 0x25a207b9, Data2: 0xddf3, Data3: 0x4660, Data4: [8]byte{0x8e, 0xe9, 0x76, 0xe5, 0x8c, 0x74, 0x06, 0x3e}, } connectExFunc = runtimeFunc{id: WSAID_CONNECTEX} ) func ConnectEx( fd windows.Handle, rsa RawSockaddr, sendBuf *byte, sendDataLen uint32, bytesSent *uint32, overlapped *windows.Overlapped, ) error { if err := connectExFunc.Load(); err != nil { return fmt.Errorf("failed to load ConnectEx function pointer: %w", err) } ptr, n, err := rsa.Sockaddr() if err != nil { return err } return connectEx(fd, ptr, n, sendBuf, sendDataLen, bytesSent, overlapped) } // BOOL LpfnConnectex( // [in] SOCKET s, // [in] const sockaddr *name, // [in] int namelen, // [in, optional] PVOID lpSendBuffer, // [in] DWORD dwSendDataLength, // [out] LPDWORD lpdwBytesSent, // [in] LPOVERLAPPED lpOverlapped // ) func connectEx( s windows.Handle, name unsafe.Pointer, namelen int32, sendBuf *byte, sendDataLen uint32, bytesSent *uint32, overlapped *windows.Overlapped, ) (err error) { r1, _, e1 := syscall.SyscallN(connectExFunc.addr, uintptr(s), uintptr(name), uintptr(namelen), uintptr(unsafe.Pointer(sendBuf)), uintptr(sendDataLen), uintptr(unsafe.Pointer(bytesSent)), uintptr(unsafe.Pointer(overlapped)), ) if r1 == 0 { if e1 != 0 { err = error(e1) } else { err = syscall.EINVAL } } return err }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/Microsoft/go-winio/internal/fs/zsyscall_windows.go
vendor/github.com/Microsoft/go-winio/internal/fs/zsyscall_windows.go
//go:build windows // Code generated by 'go generate' using "github.com/Microsoft/go-winio/tools/mkwinsyscall"; DO NOT EDIT. package fs import ( "syscall" "unsafe" "golang.org/x/sys/windows" ) var _ unsafe.Pointer // Do the interface allocations only once for common // Errno values. const ( errnoERROR_IO_PENDING = 997 ) var ( errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) errERROR_EINVAL error = syscall.EINVAL ) // errnoErr returns common boxed Errno values, to prevent // allocations at runtime. func errnoErr(e syscall.Errno) error { switch e { case 0: return errERROR_EINVAL case errnoERROR_IO_PENDING: return errERROR_IO_PENDING } return e } var ( modkernel32 = windows.NewLazySystemDLL("kernel32.dll") procCreateFileW = modkernel32.NewProc("CreateFileW") ) func CreateFile(name string, access AccessMask, mode FileShareMode, sa *windows.SecurityAttributes, createmode FileCreationDisposition, attrs FileFlagOrAttribute, templatefile windows.Handle) (handle windows.Handle, err error) { var _p0 *uint16 _p0, err = syscall.UTF16PtrFromString(name) if err != nil { return } return _CreateFile(_p0, access, mode, sa, createmode, attrs, templatefile) } func _CreateFile(name *uint16, access AccessMask, mode FileShareMode, sa *windows.SecurityAttributes, createmode FileCreationDisposition, attrs FileFlagOrAttribute, templatefile windows.Handle) (handle windows.Handle, err error) { r0, _, e1 := syscall.SyscallN(procCreateFileW.Addr(), uintptr(unsafe.Pointer(name)), uintptr(access), uintptr(mode), uintptr(unsafe.Pointer(sa)), uintptr(createmode), uintptr(attrs), uintptr(templatefile)) handle = windows.Handle(r0) if handle == windows.InvalidHandle { err = errnoErr(e1) } return }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/Microsoft/go-winio/internal/fs/security.go
vendor/github.com/Microsoft/go-winio/internal/fs/security.go
package fs // https://learn.microsoft.com/en-us/windows/win32/api/winnt/ne-winnt-security_impersonation_level type SecurityImpersonationLevel int32 // C default enums underlying type is `int`, which is Go `int32` // Impersonation levels const ( SecurityAnonymous SecurityImpersonationLevel = 0 SecurityIdentification SecurityImpersonationLevel = 1 SecurityImpersonation SecurityImpersonationLevel = 2 SecurityDelegation SecurityImpersonationLevel = 3 )
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/Microsoft/go-winio/internal/fs/doc.go
vendor/github.com/Microsoft/go-winio/internal/fs/doc.go
// This package contains Win32 filesystem functionality. package fs
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/Microsoft/go-winio/internal/fs/fs.go
vendor/github.com/Microsoft/go-winio/internal/fs/fs.go
//go:build windows package fs import ( "golang.org/x/sys/windows" "github.com/Microsoft/go-winio/internal/stringbuffer" ) //go:generate go run github.com/Microsoft/go-winio/tools/mkwinsyscall -output zsyscall_windows.go fs.go // https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-createfilew //sys CreateFile(name string, access AccessMask, mode FileShareMode, sa *windows.SecurityAttributes, createmode FileCreationDisposition, attrs FileFlagOrAttribute, templatefile windows.Handle) (handle windows.Handle, err error) [failretval==windows.InvalidHandle] = CreateFileW const NullHandle windows.Handle = 0 // AccessMask defines standard, specific, and generic rights. // // Used with CreateFile and NtCreateFile (and co.). // // Bitmask: // 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1 // 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 // +---------------+---------------+-------------------------------+ // |G|G|G|G|Resvd|A| StandardRights| SpecificRights | // |R|W|E|A| |S| | | // +-+-------------+---------------+-------------------------------+ // // GR Generic Read // GW Generic Write // GE Generic Exectue // GA Generic All // Resvd Reserved // AS Access Security System // // https://learn.microsoft.com/en-us/windows/win32/secauthz/access-mask // // https://learn.microsoft.com/en-us/windows/win32/secauthz/generic-access-rights // // https://learn.microsoft.com/en-us/windows/win32/fileio/file-access-rights-constants type AccessMask = windows.ACCESS_MASK //nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API. const ( // Not actually any. // // For CreateFile: "query certain metadata such as file, directory, or device attributes without accessing that file or device" // https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-createfilew#parameters FILE_ANY_ACCESS AccessMask = 0 GENERIC_READ AccessMask = 0x8000_0000 GENERIC_WRITE AccessMask = 0x4000_0000 GENERIC_EXECUTE AccessMask = 0x2000_0000 GENERIC_ALL AccessMask = 0x1000_0000 ACCESS_SYSTEM_SECURITY AccessMask = 0x0100_0000 // Specific Object Access // from ntioapi.h FILE_READ_DATA AccessMask = (0x0001) // file & pipe FILE_LIST_DIRECTORY AccessMask = (0x0001) // directory FILE_WRITE_DATA AccessMask = (0x0002) // file & pipe FILE_ADD_FILE AccessMask = (0x0002) // directory FILE_APPEND_DATA AccessMask = (0x0004) // file FILE_ADD_SUBDIRECTORY AccessMask = (0x0004) // directory FILE_CREATE_PIPE_INSTANCE AccessMask = (0x0004) // named pipe FILE_READ_EA AccessMask = (0x0008) // file & directory FILE_READ_PROPERTIES AccessMask = FILE_READ_EA FILE_WRITE_EA AccessMask = (0x0010) // file & directory FILE_WRITE_PROPERTIES AccessMask = FILE_WRITE_EA FILE_EXECUTE AccessMask = (0x0020) // file FILE_TRAVERSE AccessMask = (0x0020) // directory FILE_DELETE_CHILD AccessMask = (0x0040) // directory FILE_READ_ATTRIBUTES AccessMask = (0x0080) // all FILE_WRITE_ATTRIBUTES AccessMask = (0x0100) // all FILE_ALL_ACCESS AccessMask = (STANDARD_RIGHTS_REQUIRED | SYNCHRONIZE | 0x1FF) FILE_GENERIC_READ AccessMask = (STANDARD_RIGHTS_READ | FILE_READ_DATA | FILE_READ_ATTRIBUTES | FILE_READ_EA | SYNCHRONIZE) FILE_GENERIC_WRITE AccessMask = (STANDARD_RIGHTS_WRITE | FILE_WRITE_DATA | FILE_WRITE_ATTRIBUTES | FILE_WRITE_EA | FILE_APPEND_DATA | SYNCHRONIZE) FILE_GENERIC_EXECUTE AccessMask = (STANDARD_RIGHTS_EXECUTE | FILE_READ_ATTRIBUTES | FILE_EXECUTE | SYNCHRONIZE) SPECIFIC_RIGHTS_ALL AccessMask = 0x0000FFFF // Standard Access // from ntseapi.h DELETE AccessMask = 0x0001_0000 READ_CONTROL AccessMask = 0x0002_0000 WRITE_DAC AccessMask = 0x0004_0000 WRITE_OWNER AccessMask = 0x0008_0000 SYNCHRONIZE AccessMask = 0x0010_0000 STANDARD_RIGHTS_REQUIRED AccessMask = 0x000F_0000 STANDARD_RIGHTS_READ AccessMask = READ_CONTROL STANDARD_RIGHTS_WRITE AccessMask = READ_CONTROL STANDARD_RIGHTS_EXECUTE AccessMask = READ_CONTROL STANDARD_RIGHTS_ALL AccessMask = 0x001F_0000 ) type FileShareMode uint32 //nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API. const ( FILE_SHARE_NONE FileShareMode = 0x00 FILE_SHARE_READ FileShareMode = 0x01 FILE_SHARE_WRITE FileShareMode = 0x02 FILE_SHARE_DELETE FileShareMode = 0x04 FILE_SHARE_VALID_FLAGS FileShareMode = 0x07 ) type FileCreationDisposition uint32 //nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API. const ( // from winbase.h CREATE_NEW FileCreationDisposition = 0x01 CREATE_ALWAYS FileCreationDisposition = 0x02 OPEN_EXISTING FileCreationDisposition = 0x03 OPEN_ALWAYS FileCreationDisposition = 0x04 TRUNCATE_EXISTING FileCreationDisposition = 0x05 ) // Create disposition values for NtCreate* type NTFileCreationDisposition uint32 //nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API. const ( // From ntioapi.h FILE_SUPERSEDE NTFileCreationDisposition = 0x00 FILE_OPEN NTFileCreationDisposition = 0x01 FILE_CREATE NTFileCreationDisposition = 0x02 FILE_OPEN_IF NTFileCreationDisposition = 0x03 FILE_OVERWRITE NTFileCreationDisposition = 0x04 FILE_OVERWRITE_IF NTFileCreationDisposition = 0x05 FILE_MAXIMUM_DISPOSITION NTFileCreationDisposition = 0x05 ) // CreateFile and co. take flags or attributes together as one parameter. // Define alias until we can use generics to allow both // // https://learn.microsoft.com/en-us/windows/win32/fileio/file-attribute-constants type FileFlagOrAttribute uint32 //nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API. const ( // from winnt.h FILE_FLAG_WRITE_THROUGH FileFlagOrAttribute = 0x8000_0000 FILE_FLAG_OVERLAPPED FileFlagOrAttribute = 0x4000_0000 FILE_FLAG_NO_BUFFERING FileFlagOrAttribute = 0x2000_0000 FILE_FLAG_RANDOM_ACCESS FileFlagOrAttribute = 0x1000_0000 FILE_FLAG_SEQUENTIAL_SCAN FileFlagOrAttribute = 0x0800_0000 FILE_FLAG_DELETE_ON_CLOSE FileFlagOrAttribute = 0x0400_0000 FILE_FLAG_BACKUP_SEMANTICS FileFlagOrAttribute = 0x0200_0000 FILE_FLAG_POSIX_SEMANTICS FileFlagOrAttribute = 0x0100_0000 FILE_FLAG_OPEN_REPARSE_POINT FileFlagOrAttribute = 0x0020_0000 FILE_FLAG_OPEN_NO_RECALL FileFlagOrAttribute = 0x0010_0000 FILE_FLAG_FIRST_PIPE_INSTANCE FileFlagOrAttribute = 0x0008_0000 ) // NtCreate* functions take a dedicated CreateOptions parameter. // // https://learn.microsoft.com/en-us/windows/win32/api/Winternl/nf-winternl-ntcreatefile // // https://learn.microsoft.com/en-us/windows/win32/devnotes/nt-create-named-pipe-file type NTCreateOptions uint32 //nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API. const ( // From ntioapi.h FILE_DIRECTORY_FILE NTCreateOptions = 0x0000_0001 FILE_WRITE_THROUGH NTCreateOptions = 0x0000_0002 FILE_SEQUENTIAL_ONLY NTCreateOptions = 0x0000_0004 FILE_NO_INTERMEDIATE_BUFFERING NTCreateOptions = 0x0000_0008 FILE_SYNCHRONOUS_IO_ALERT NTCreateOptions = 0x0000_0010 FILE_SYNCHRONOUS_IO_NONALERT NTCreateOptions = 0x0000_0020 FILE_NON_DIRECTORY_FILE NTCreateOptions = 0x0000_0040 FILE_CREATE_TREE_CONNECTION NTCreateOptions = 0x0000_0080 FILE_COMPLETE_IF_OPLOCKED NTCreateOptions = 0x0000_0100 FILE_NO_EA_KNOWLEDGE NTCreateOptions = 0x0000_0200 FILE_DISABLE_TUNNELING NTCreateOptions = 0x0000_0400 FILE_RANDOM_ACCESS NTCreateOptions = 0x0000_0800 FILE_DELETE_ON_CLOSE NTCreateOptions = 0x0000_1000 FILE_OPEN_BY_FILE_ID NTCreateOptions = 0x0000_2000 FILE_OPEN_FOR_BACKUP_INTENT NTCreateOptions = 0x0000_4000 FILE_NO_COMPRESSION NTCreateOptions = 0x0000_8000 ) type FileSQSFlag = FileFlagOrAttribute //nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API. const ( // from winbase.h SECURITY_ANONYMOUS FileSQSFlag = FileSQSFlag(SecurityAnonymous << 16) SECURITY_IDENTIFICATION FileSQSFlag = FileSQSFlag(SecurityIdentification << 16) SECURITY_IMPERSONATION FileSQSFlag = FileSQSFlag(SecurityImpersonation << 16) SECURITY_DELEGATION FileSQSFlag = FileSQSFlag(SecurityDelegation << 16) SECURITY_SQOS_PRESENT FileSQSFlag = 0x0010_0000 SECURITY_VALID_SQOS_FLAGS FileSQSFlag = 0x001F_0000 ) // GetFinalPathNameByHandle flags // // https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-getfinalpathnamebyhandlew#parameters type GetFinalPathFlag uint32 //nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API. const ( GetFinalPathDefaultFlag GetFinalPathFlag = 0x0 FILE_NAME_NORMALIZED GetFinalPathFlag = 0x0 FILE_NAME_OPENED GetFinalPathFlag = 0x8 VOLUME_NAME_DOS GetFinalPathFlag = 0x0 VOLUME_NAME_GUID GetFinalPathFlag = 0x1 VOLUME_NAME_NT GetFinalPathFlag = 0x2 VOLUME_NAME_NONE GetFinalPathFlag = 0x4 ) // getFinalPathNameByHandle facilitates calling the Windows API GetFinalPathNameByHandle // with the given handle and flags. It transparently takes care of creating a buffer of the // correct size for the call. // // https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-getfinalpathnamebyhandlew func GetFinalPathNameByHandle(h windows.Handle, flags GetFinalPathFlag) (string, error) { b := stringbuffer.NewWString() //TODO: can loop infinitely if Win32 keeps returning the same (or a larger) n? for { n, err := windows.GetFinalPathNameByHandle(h, b.Pointer(), b.Cap(), uint32(flags)) if err != nil { return "", err } // If the buffer wasn't large enough, n will be the total size needed (including null terminator). // Resize and try again. if n > b.Cap() { b.ResizeTo(n) continue } // If the buffer is large enough, n will be the size not including the null terminator. // Convert to a Go string and return. return b.String(), nil } }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/lucasb-eyer/go-colorful/sort.go
vendor/github.com/lucasb-eyer/go-colorful/sort.go
// This file provides functions for sorting colors. package colorful import ( "math" "sort" ) // An element represents a single element of a set. It is used to // implement a disjoint-set forest. type element struct { parent *element // Parent element rank int // Rank (approximate depth) of the subtree with this element as root } // newElement creates a singleton set and returns its sole element. func newElement() *element { s := &element{} s.parent = s return s } // find returns an arbitrary element of a set when invoked on any element of // the set, The important feature is that it returns the same value when // invoked on any element of the set. Consequently, it can be used to test if // two elements belong to the same set. func (e *element) find() *element { for e.parent != e { e.parent = e.parent.parent e = e.parent } return e } // union establishes the union of two sets when given an element from each set. // Afterwards, the original sets no longer exist as separate entities. func union(e1, e2 *element) { // Ensure the two elements aren't already part of the same union. e1Root := e1.find() e2Root := e2.find() if e1Root == e2Root { return } // Create a union by making the shorter tree point to the root of the // larger tree. switch { case e1Root.rank < e2Root.rank: e1Root.parent = e2Root case e1Root.rank > e2Root.rank: e2Root.parent = e1Root default: e2Root.parent = e1Root e1Root.rank++ } } // An edgeIdxs describes an edge in a graph or tree. The vertices in the edge // are indexes into a list of Color values. type edgeIdxs [2]int // An edgeDistance is a map from an edge (pair of indices) to a distance // between the two vertices. type edgeDistance map[edgeIdxs]float64 // allToAllDistancesCIEDE2000 computes the CIEDE2000 distance between each pair of // colors. It returns a map from a pair of indices (u, v) with u < v to a // distance. func allToAllDistancesCIEDE2000(cs []Color) edgeDistance { nc := len(cs) m := make(edgeDistance, nc*nc) for u := 0; u < nc-1; u++ { for v := u + 1; v < nc; v++ { m[edgeIdxs{u, v}] = cs[u].DistanceCIEDE2000(cs[v]) } } return m } // sortEdges sorts all edges in a distance map by increasing vertex distance. func sortEdges(m edgeDistance) []edgeIdxs { es := make([]edgeIdxs, 0, len(m)) for uv := range m { es = append(es, uv) } sort.Slice(es, func(i, j int) bool { return m[es[i]] < m[es[j]] }) return es } // minSpanTree computes a minimum spanning tree from a vertex count and a // distance-sorted edge list. It returns the subset of edges that belong to // the tree, including both (u, v) and (v, u) for each edge. func minSpanTree(nc int, es []edgeIdxs) map[edgeIdxs]struct{} { // Start with each vertex in its own set. elts := make([]*element, nc) for i := range elts { elts[i] = newElement() } // Run Kruskal's algorithm to construct a minimal spanning tree. mst := make(map[edgeIdxs]struct{}, nc) for _, uv := range es { u, v := uv[0], uv[1] if elts[u].find() == elts[v].find() { continue // Same set: edge would introduce a cycle. } mst[uv] = struct{}{} mst[edgeIdxs{v, u}] = struct{}{} union(elts[u], elts[v]) } return mst } // traverseMST walks a minimum spanning tree in prefix order. func traverseMST(mst map[edgeIdxs]struct{}, root int) []int { // Compute a list of neighbors for each vertex. neighs := make(map[int][]int, len(mst)) for uv := range mst { u, v := uv[0], uv[1] neighs[u] = append(neighs[u], v) } for u, vs := range neighs { sort.Ints(vs) copy(neighs[u], vs) } // Walk the tree from a given vertex. order := make([]int, 0, len(neighs)) visited := make(map[int]bool, len(neighs)) var walkFrom func(int) walkFrom = func(r int) { // Visit the starting vertex. order = append(order, r) visited[r] = true // Recursively visit each child in turn. for _, c := range neighs[r] { if !visited[c] { walkFrom(c) } } } walkFrom(root) return order } // Sorted sorts a list of Color values. Sorting is not a well-defined operation // for colors so the intention here primarily is to order colors so that the // transition from one to the next is fairly smooth. func Sorted(cs []Color) []Color { // Do nothing in trivial cases. newCs := make([]Color, len(cs)) if len(cs) < 2 { copy(newCs, cs) return newCs } // Compute the distance from each color to every other color. dists := allToAllDistancesCIEDE2000(cs) // Produce a list of edges in increasing order of the distance between // their vertices. edges := sortEdges(dists) // Construct a minimum spanning tree from the list of edges. mst := minSpanTree(len(cs), edges) // Find the darkest color in the list. var black Color var dIdx int // Index of darkest color light := math.MaxFloat64 // Lightness of darkest color (distance from black) for i, c := range cs { d := black.DistanceCIEDE2000(c) if d < light { dIdx = i light = d } } // Traverse the tree starting from the darkest color. idxs := traverseMST(mst, dIdx) // Convert the index list to a list of colors, overwriting the input. for i, idx := range idxs { newCs[i] = cs[idx] } return newCs }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/lucasb-eyer/go-colorful/hsluv.go
vendor/github.com/lucasb-eyer/go-colorful/hsluv.go
package colorful import "math" // Source: https://github.com/hsluv/hsluv-go // Under MIT License // Modified so that Saturation and Luminance are in [0..1] instead of [0..100]. // HSLuv uses a rounded version of the D65. This has no impact on the final RGB // values, but to keep high levels of accuracy for internal operations and when // comparing to the test values, this modified white reference is used internally. // // See this GitHub thread for details on these values: // // https://github.com/hsluv/hsluv/issues/79 var hSLuvD65 = [3]float64{0.95045592705167, 1.0, 1.089057750759878} func LuvLChToHSLuv(l, c, h float64) (float64, float64, float64) { // [-1..1] but the code expects it to be [-100..100] c *= 100.0 l *= 100.0 var s, max float64 if l > 99.9999999 || l < 0.00000001 { s = 0.0 } else { max = maxChromaForLH(l, h) s = c / max * 100.0 } return h, clamp01(s / 100.0), clamp01(l / 100.0) } func HSLuvToLuvLCh(h, s, l float64) (float64, float64, float64) { l *= 100.0 s *= 100.0 var c, max float64 if l > 99.9999999 || l < 0.00000001 { c = 0.0 } else { max = maxChromaForLH(l, h) c = max / 100.0 * s } // c is [-100..100], but for LCh it's supposed to be almost [-1..1] return clamp01(l / 100.0), c / 100.0, h } func LuvLChToHPLuv(l, c, h float64) (float64, float64, float64) { // [-1..1] but the code expects it to be [-100..100] c *= 100.0 l *= 100.0 var s, max float64 if l > 99.9999999 || l < 0.00000001 { s = 0.0 } else { max = maxSafeChromaForL(l) s = c / max * 100.0 } return h, s / 100.0, l / 100.0 } func HPLuvToLuvLCh(h, s, l float64) (float64, float64, float64) { // [-1..1] but the code expects it to be [-100..100] l *= 100.0 s *= 100.0 var c, max float64 if l > 99.9999999 || l < 0.00000001 { c = 0.0 } else { max = maxSafeChromaForL(l) c = max / 100.0 * s } return l / 100.0, c / 100.0, h } // HSLuv creates a new Color from values in the HSLuv color space. // Hue in [0..360], a Saturation [0..1], and a Luminance (lightness) in [0..1]. // // The returned color values are clamped (using .Clamped), so this will never output // an invalid color. func HSLuv(h, s, l float64) Color { // HSLuv -> LuvLCh -> CIELUV -> CIEXYZ -> Linear RGB -> sRGB l, u, v := LuvLChToLuv(HSLuvToLuvLCh(h, s, l)) return LinearRgb(XyzToLinearRgb(LuvToXyzWhiteRef(l, u, v, hSLuvD65))).Clamped() } // HPLuv creates a new Color from values in the HPLuv color space. // Hue in [0..360], a Saturation [0..1], and a Luminance (lightness) in [0..1]. // // The returned color values are clamped (using .Clamped), so this will never output // an invalid color. func HPLuv(h, s, l float64) Color { // HPLuv -> LuvLCh -> CIELUV -> CIEXYZ -> Linear RGB -> sRGB l, u, v := LuvLChToLuv(HPLuvToLuvLCh(h, s, l)) return LinearRgb(XyzToLinearRgb(LuvToXyzWhiteRef(l, u, v, hSLuvD65))).Clamped() } // HSLuv returns the Hue, Saturation and Luminance of the color in the HSLuv // color space. Hue in [0..360], a Saturation [0..1], and a Luminance // (lightness) in [0..1]. func (col Color) HSLuv() (h, s, l float64) { // sRGB -> Linear RGB -> CIEXYZ -> CIELUV -> LuvLCh -> HSLuv return LuvLChToHSLuv(col.LuvLChWhiteRef(hSLuvD65)) } // HPLuv returns the Hue, Saturation and Luminance of the color in the HSLuv // color space. Hue in [0..360], a Saturation [0..1], and a Luminance // (lightness) in [0..1]. // // Note that HPLuv can only represent pastel colors, and so the Saturation // value could be much larger than 1 for colors it can't represent. func (col Color) HPLuv() (h, s, l float64) { return LuvLChToHPLuv(col.LuvLChWhiteRef(hSLuvD65)) } // DistanceHSLuv calculates Euclidean distance in the HSLuv colorspace. No idea // how useful this is. // // The Hue value is divided by 100 before the calculation, so that H, S, and L // have the same relative ranges. func (c1 Color) DistanceHSLuv(c2 Color) float64 { h1, s1, l1 := c1.HSLuv() h2, s2, l2 := c2.HSLuv() return math.Sqrt(sq((h1-h2)/100.0) + sq(s1-s2) + sq(l1-l2)) } // DistanceHPLuv calculates Euclidean distance in the HPLuv colorspace. No idea // how useful this is. // // The Hue value is divided by 100 before the calculation, so that H, S, and L // have the same relative ranges. func (c1 Color) DistanceHPLuv(c2 Color) float64 { h1, s1, l1 := c1.HPLuv() h2, s2, l2 := c2.HPLuv() return math.Sqrt(sq((h1-h2)/100.0) + sq(s1-s2) + sq(l1-l2)) } var m = [3][3]float64{ {3.2409699419045214, -1.5373831775700935, -0.49861076029300328}, {-0.96924363628087983, 1.8759675015077207, 0.041555057407175613}, {0.055630079696993609, -0.20397695888897657, 1.0569715142428786}, } const kappa = 903.2962962962963 const epsilon = 0.0088564516790356308 func maxChromaForLH(l, h float64) float64 { hRad := h / 360.0 * math.Pi * 2.0 minLength := math.MaxFloat64 for _, line := range getBounds(l) { length := lengthOfRayUntilIntersect(hRad, line[0], line[1]) if length > 0.0 && length < minLength { minLength = length } } return minLength } func getBounds(l float64) [6][2]float64 { var sub2 float64 var ret [6][2]float64 sub1 := math.Pow(l+16.0, 3.0) / 1560896.0 if sub1 > epsilon { sub2 = sub1 } else { sub2 = l / kappa } for i := range m { for k := 0; k < 2; k++ { top1 := (284517.0*m[i][0] - 94839.0*m[i][2]) * sub2 top2 := (838422.0*m[i][2]+769860.0*m[i][1]+731718.0*m[i][0])*l*sub2 - 769860.0*float64(k)*l bottom := (632260.0*m[i][2]-126452.0*m[i][1])*sub2 + 126452.0*float64(k) ret[i*2+k][0] = top1 / bottom ret[i*2+k][1] = top2 / bottom } } return ret } func lengthOfRayUntilIntersect(theta, x, y float64) (length float64) { length = y / (math.Sin(theta) - x*math.Cos(theta)) return } func maxSafeChromaForL(l float64) float64 { minLength := math.MaxFloat64 for _, line := range getBounds(l) { m1 := line[0] b1 := line[1] x := intersectLineLine(m1, b1, -1.0/m1, 0.0) dist := distanceFromPole(x, b1+x*m1) if dist < minLength { minLength = dist } } return minLength } func intersectLineLine(x1, y1, x2, y2 float64) float64 { return (y1 - y2) / (x2 - x1) } func distanceFromPole(x, y float64) float64 { return math.Sqrt(math.Pow(x, 2.0) + math.Pow(y, 2.0)) }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/lucasb-eyer/go-colorful/colorgens.go
vendor/github.com/lucasb-eyer/go-colorful/colorgens.go
// Various ways to generate single random colors package colorful // Creates a random dark, "warm" color through a restricted HSV space. func FastWarmColorWithRand(rand RandInterface) Color { return Hsv( rand.Float64()*360.0, 0.5+rand.Float64()*0.3, 0.3+rand.Float64()*0.3) } func FastWarmColor() Color { return FastWarmColorWithRand(getDefaultGlobalRand()) } // Creates a random dark, "warm" color through restricted HCL space. // This is slower than FastWarmColor but will likely give you colors which have // the same "warmness" if you run it many times. func WarmColorWithRand(rand RandInterface) (c Color) { for c = randomWarmWithRand(rand); !c.IsValid(); c = randomWarmWithRand(rand) { } return } func WarmColor() (c Color) { return WarmColorWithRand(getDefaultGlobalRand()) } func randomWarmWithRand(rand RandInterface) Color { return Hcl( rand.Float64()*360.0, 0.1+rand.Float64()*0.3, 0.2+rand.Float64()*0.3) } // Creates a random bright, "pimpy" color through a restricted HSV space. func FastHappyColorWithRand(rand RandInterface) Color { return Hsv( rand.Float64()*360.0, 0.7+rand.Float64()*0.3, 0.6+rand.Float64()*0.3) } func FastHappyColor() Color { return FastHappyColorWithRand(getDefaultGlobalRand()) } // Creates a random bright, "pimpy" color through restricted HCL space. // This is slower than FastHappyColor but will likely give you colors which // have the same "brightness" if you run it many times. func HappyColorWithRand(rand RandInterface) (c Color) { for c = randomPimpWithRand(rand); !c.IsValid(); c = randomPimpWithRand(rand) { } return } func HappyColor() (c Color) { return HappyColorWithRand(getDefaultGlobalRand()) } func randomPimpWithRand(rand RandInterface) Color { return Hcl( rand.Float64()*360.0, 0.5+rand.Float64()*0.3, 0.5+rand.Float64()*0.3) }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/lucasb-eyer/go-colorful/happy_palettegen.go
vendor/github.com/lucasb-eyer/go-colorful/happy_palettegen.go
package colorful // Uses the HSV color space to generate colors with similar S,V but distributed // evenly along their Hue. This is fast but not always pretty. // If you've got time to spare, use Lab (the non-fast below). func FastHappyPaletteWithRand(colorsCount int, rand RandInterface) (colors []Color) { colors = make([]Color, colorsCount) for i := 0; i < colorsCount; i++ { colors[i] = Hsv(float64(i)*(360.0/float64(colorsCount)), 0.8+rand.Float64()*0.2, 0.65+rand.Float64()*0.2) } return } func FastHappyPalette(colorsCount int) (colors []Color) { return FastHappyPaletteWithRand(colorsCount, getDefaultGlobalRand()) } func HappyPaletteWithRand(colorsCount int, rand RandInterface) ([]Color, error) { pimpy := func(l, a, b float64) bool { _, c, _ := LabToHcl(l, a, b) return 0.3 <= c && 0.4 <= l && l <= 0.8 } return SoftPaletteExWithRand(colorsCount, SoftPaletteSettings{pimpy, 50, true}, rand) } func HappyPalette(colorsCount int) ([]Color, error) { return HappyPaletteWithRand(colorsCount, getDefaultGlobalRand()) }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/lucasb-eyer/go-colorful/soft_palettegen.go
vendor/github.com/lucasb-eyer/go-colorful/soft_palettegen.go
// Largely inspired by the descriptions in http://lab.medialab.sciences-po.fr/iwanthue/ // but written from scratch. package colorful import ( "fmt" "math" ) // The algorithm works in L*a*b* color space and converts to RGB in the end. // L* in [0..1], a* and b* in [-1..1] type lab_t struct { L, A, B float64 } type SoftPaletteSettings struct { // A function which can be used to restrict the allowed color-space. CheckColor func(l, a, b float64) bool // The higher, the better quality but the slower. Usually two figures. Iterations int // Use up to 160000 or 8000 samples of the L*a*b* space (and thus calls to CheckColor). // Set this to true only if your CheckColor shapes the Lab space weirdly. ManySamples bool } // Yeah, windows-stype Foo, FooEx, screw you golang... // Uses K-means to cluster the color-space and return the means of the clusters // as a new palette of distinctive colors. Falls back to K-medoid if the mean // happens to fall outside of the color-space, which can only happen if you // specify a CheckColor function. func SoftPaletteExWithRand(colorsCount int, settings SoftPaletteSettings, rand RandInterface) ([]Color, error) { // Checks whether it's a valid RGB and also fulfills the potentially provided constraint. check := func(col lab_t) bool { c := Lab(col.L, col.A, col.B) return c.IsValid() && (settings.CheckColor == nil || settings.CheckColor(col.L, col.A, col.B)) } // Sample the color space. These will be the points k-means is run on. dl := 0.05 dab := 0.1 if settings.ManySamples { dl = 0.01 dab = 0.05 } samples := make([]lab_t, 0, int(1.0/dl*2.0/dab*2.0/dab)) for l := 0.0; l <= 1.0; l += dl { for a := -1.0; a <= 1.0; a += dab { for b := -1.0; b <= 1.0; b += dab { if check(lab_t{l, a, b}) { samples = append(samples, lab_t{l, a, b}) } } } } // That would cause some infinite loops down there... if len(samples) < colorsCount { return nil, fmt.Errorf("palettegen: more colors requested (%v) than samples available (%v). Your requested color count may be wrong, you might want to use many samples or your constraint function makes the valid color space too small", colorsCount, len(samples)) } else if len(samples) == colorsCount { return labs2cols(samples), nil // Oops? } // We take the initial means out of the samples, so they are in fact medoids. // This helps us avoid infinite loops or arbitrary cutoffs with too restrictive constraints. means := make([]lab_t, colorsCount) for i := 0; i < colorsCount; i++ { for means[i] = samples[rand.Intn(len(samples))]; in(means, i, means[i]); means[i] = samples[rand.Intn(len(samples))] { } } clusters := make([]int, len(samples)) samples_used := make([]bool, len(samples)) // The actual k-means/medoid iterations for i := 0; i < settings.Iterations; i++ { // Reassigning the samples to clusters, i.e. to their closest mean. // By the way, also check if any sample is used as a medoid and if so, mark that. for isample, sample := range samples { samples_used[isample] = false mindist := math.Inf(+1) for imean, mean := range means { dist := lab_dist(sample, mean) if dist < mindist { mindist = dist clusters[isample] = imean } // Mark samples which are used as a medoid. if lab_eq(sample, mean) { samples_used[isample] = true } } } // Compute new means according to the samples. for imean := range means { // The new mean is the average of all samples belonging to it. nsamples := 0 newmean := lab_t{0.0, 0.0, 0.0} for isample, sample := range samples { if clusters[isample] == imean { nsamples++ newmean.L += sample.L newmean.A += sample.A newmean.B += sample.B } } if nsamples > 0 { newmean.L /= float64(nsamples) newmean.A /= float64(nsamples) newmean.B /= float64(nsamples) } else { // That mean doesn't have any samples? Get a new mean from the sample list! var inewmean int for inewmean = rand.Intn(len(samples_used)); samples_used[inewmean]; inewmean = rand.Intn(len(samples_used)) { } newmean = samples[inewmean] samples_used[inewmean] = true } // But now we still need to check whether the new mean is an allowed color. if nsamples > 0 && check(newmean) { // It does, life's good (TM) means[imean] = newmean } else { // New mean isn't an allowed color or doesn't have any samples! // Switch to medoid mode and pick the closest (unused) sample. // This should always find something thanks to len(samples) >= colorsCount mindist := math.Inf(+1) for isample, sample := range samples { if !samples_used[isample] { dist := lab_dist(sample, newmean) if dist < mindist { mindist = dist newmean = sample } } } } } } return labs2cols(means), nil } func SoftPaletteEx(colorsCount int, settings SoftPaletteSettings) ([]Color, error) { return SoftPaletteExWithRand(colorsCount, settings, getDefaultGlobalRand()) } // A wrapper which uses common parameters. func SoftPaletteWithRand(colorsCount int, rand RandInterface) ([]Color, error) { return SoftPaletteExWithRand(colorsCount, SoftPaletteSettings{nil, 50, false}, rand) } func SoftPalette(colorsCount int) ([]Color, error) { return SoftPaletteWithRand(colorsCount, getDefaultGlobalRand()) } func in(haystack []lab_t, upto int, needle lab_t) bool { for i := 0; i < upto && i < len(haystack); i++ { if haystack[i] == needle { return true } } return false } const LAB_DELTA = 1e-6 func lab_eq(lab1, lab2 lab_t) bool { return math.Abs(lab1.L-lab2.L) < LAB_DELTA && math.Abs(lab1.A-lab2.A) < LAB_DELTA && math.Abs(lab1.B-lab2.B) < LAB_DELTA } // That's faster than using colorful's DistanceLab since we would have to // convert back and forth for that. Here is no conversion. func lab_dist(lab1, lab2 lab_t) float64 { return math.Sqrt(sq(lab1.L-lab2.L) + sq(lab1.A-lab2.A) + sq(lab1.B-lab2.B)) } func labs2cols(labs []lab_t) (cols []Color) { cols = make([]Color, len(labs)) for k, v := range labs { cols[k] = Lab(v.L, v.A, v.B) } return cols }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/lucasb-eyer/go-colorful/rand.go
vendor/github.com/lucasb-eyer/go-colorful/rand.go
package colorful import "math/rand" type RandInterface interface { Float64() float64 Intn(n int) int } type defaultGlobalRand struct{} func (df defaultGlobalRand) Float64() float64 { return rand.Float64() } func (df defaultGlobalRand) Intn(n int) int { return rand.Intn(n) } func getDefaultGlobalRand() RandInterface { return defaultGlobalRand{} }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/lucasb-eyer/go-colorful/colors.go
vendor/github.com/lucasb-eyer/go-colorful/colors.go
// The colorful package provides all kinds of functions for working with colors. package colorful import ( "fmt" "image/color" "math" "strconv" ) // A color is stored internally using sRGB (standard RGB) values in the range 0-1 type Color struct { R, G, B float64 } // Implement the Go color.Color interface. func (col Color) RGBA() (r, g, b, a uint32) { r = uint32(col.R*65535.0 + 0.5) g = uint32(col.G*65535.0 + 0.5) b = uint32(col.B*65535.0 + 0.5) a = 0xFFFF return } // Constructs a colorful.Color from something implementing color.Color func MakeColor(col color.Color) (Color, bool) { r, g, b, a := col.RGBA() if a == 0 { return Color{0, 0, 0}, false } // Since color.Color is alpha pre-multiplied, we need to divide the // RGB values by alpha again in order to get back the original RGB. r *= 0xffff r /= a g *= 0xffff g /= a b *= 0xffff b /= a return Color{float64(r) / 65535.0, float64(g) / 65535.0, float64(b) / 65535.0}, true } // Might come in handy sometimes to reduce boilerplate code. func (col Color) RGB255() (r, g, b uint8) { r = uint8(col.R*255.0 + 0.5) g = uint8(col.G*255.0 + 0.5) b = uint8(col.B*255.0 + 0.5) return } // Used to simplify HSLuv testing. func (col Color) values() (float64, float64, float64) { return col.R, col.G, col.B } // This is the tolerance used when comparing colors using AlmostEqualRgb. const Delta = 1.0 / 255.0 // This is the default reference white point. var D65 = [3]float64{0.95047, 1.00000, 1.08883} // And another one. var D50 = [3]float64{0.96422, 1.00000, 0.82521} // Checks whether the color exists in RGB space, i.e. all values are in [0..1] func (c Color) IsValid() bool { return 0.0 <= c.R && c.R <= 1.0 && 0.0 <= c.G && c.G <= 1.0 && 0.0 <= c.B && c.B <= 1.0 } // clamp01 clamps from 0 to 1. func clamp01(v float64) float64 { return math.Max(0.0, math.Min(v, 1.0)) } // Returns Clamps the color into valid range, clamping each value to [0..1] // If the color is valid already, this is a no-op. func (c Color) Clamped() Color { return Color{clamp01(c.R), clamp01(c.G), clamp01(c.B)} } func sq(v float64) float64 { return v * v } func cub(v float64) float64 { return v * v * v } // DistanceRgb computes the distance between two colors in RGB space. // This is not a good measure! Rather do it in Lab space. func (c1 Color) DistanceRgb(c2 Color) float64 { return math.Sqrt(sq(c1.R-c2.R) + sq(c1.G-c2.G) + sq(c1.B-c2.B)) } // DistanceLinearRgb computes the distance between two colors in linear RGB // space. This is not useful for measuring how humans perceive color, but // might be useful for other things, like dithering. func (c1 Color) DistanceLinearRgb(c2 Color) float64 { r1, g1, b1 := c1.LinearRgb() r2, g2, b2 := c2.LinearRgb() return math.Sqrt(sq(r1-r2) + sq(g1-g2) + sq(b1-b2)) } // DistanceLinearRGB is deprecated in favour of DistanceLinearRgb. // They do the exact same thing. func (c1 Color) DistanceLinearRGB(c2 Color) float64 { return c1.DistanceLinearRgb(c2) } // DistanceRiemersma is a color distance algorithm developed by Thiadmer Riemersma. // It uses RGB coordinates, but he claims it has similar results to CIELUV. // This makes it both fast and accurate. // // Sources: // // https://www.compuphase.com/cmetric.htm // https://github.com/lucasb-eyer/go-colorful/issues/52 func (c1 Color) DistanceRiemersma(c2 Color) float64 { rAvg := (c1.R + c2.R) / 2.0 // Deltas dR := c1.R - c2.R dG := c1.G - c2.G dB := c1.B - c2.B return math.Sqrt((2+rAvg)*dR*dR + 4*dG*dG + (2+(1-rAvg))*dB*dB) } // Check for equality between colors within the tolerance Delta (1/255). func (c1 Color) AlmostEqualRgb(c2 Color) bool { return math.Abs(c1.R-c2.R)+ math.Abs(c1.G-c2.G)+ math.Abs(c1.B-c2.B) < 3.0*Delta } // You don't really want to use this, do you? Go for BlendLab, BlendLuv or BlendHcl. func (c1 Color) BlendRgb(c2 Color, t float64) Color { return Color{ c1.R + t*(c2.R-c1.R), c1.G + t*(c2.G-c1.G), c1.B + t*(c2.B-c1.B), } } // Utility used by Hxx color-spaces for interpolating between two angles in [0,360]. func interp_angle(a0, a1, t float64) float64 { // Based on the answer here: http://stackoverflow.com/a/14498790/2366315 // With potential proof that it works here: http://math.stackexchange.com/a/2144499 delta := math.Mod(math.Mod(a1-a0, 360.0)+540, 360.0) - 180.0 return math.Mod(a0+t*delta+360.0, 360.0) } /// HSV /// /////////// // From http://en.wikipedia.org/wiki/HSL_and_HSV // Note that h is in [0..359] and s,v in [0..1] // Hsv returns the Hue [0..359], Saturation and Value [0..1] of the color. func (col Color) Hsv() (h, s, v float64) { min := math.Min(math.Min(col.R, col.G), col.B) v = math.Max(math.Max(col.R, col.G), col.B) C := v - min s = 0.0 if v != 0.0 { s = C / v } h = 0.0 // We use 0 instead of undefined as in wp. if min != v { if v == col.R { h = math.Mod((col.G-col.B)/C, 6.0) } if v == col.G { h = (col.B-col.R)/C + 2.0 } if v == col.B { h = (col.R-col.G)/C + 4.0 } h *= 60.0 if h < 0.0 { h += 360.0 } } return } // Hsv creates a new Color given a Hue in [0..359], a Saturation and a Value in [0..1] func Hsv(H, S, V float64) Color { Hp := H / 60.0 C := V * S X := C * (1.0 - math.Abs(math.Mod(Hp, 2.0)-1.0)) m := V - C r, g, b := 0.0, 0.0, 0.0 switch { case 0.0 <= Hp && Hp < 1.0: r = C g = X case 1.0 <= Hp && Hp < 2.0: r = X g = C case 2.0 <= Hp && Hp < 3.0: g = C b = X case 3.0 <= Hp && Hp < 4.0: g = X b = C case 4.0 <= Hp && Hp < 5.0: r = X b = C case 5.0 <= Hp && Hp < 6.0: r = C b = X } return Color{m + r, m + g, m + b} } // You don't really want to use this, do you? Go for BlendLab, BlendLuv or BlendHcl. func (c1 Color) BlendHsv(c2 Color, t float64) Color { h1, s1, v1 := c1.Hsv() h2, s2, v2 := c2.Hsv() // https://github.com/lucasb-eyer/go-colorful/pull/60 if s1 == 0 && s2 != 0 { h1 = h2 } else if s2 == 0 && s1 != 0 { h2 = h1 } // We know that h are both in [0..360] return Hsv(interp_angle(h1, h2, t), s1+t*(s2-s1), v1+t*(v2-v1)) } /// HSL /// /////////// // Hsl returns the Hue [0..359], Saturation [0..1], and Luminance (lightness) [0..1] of the color. func (col Color) Hsl() (h, s, l float64) { min := math.Min(math.Min(col.R, col.G), col.B) max := math.Max(math.Max(col.R, col.G), col.B) l = (max + min) / 2 if min == max { s = 0 h = 0 } else { if l < 0.5 { s = (max - min) / (max + min) } else { s = (max - min) / (2.0 - max - min) } if max == col.R { h = (col.G - col.B) / (max - min) } else if max == col.G { h = 2.0 + (col.B-col.R)/(max-min) } else { h = 4.0 + (col.R-col.G)/(max-min) } h *= 60 if h < 0 { h += 360 } } return } // Hsl creates a new Color given a Hue in [0..359], a Saturation [0..1], and a Luminance (lightness) in [0..1] func Hsl(h, s, l float64) Color { if s == 0 { return Color{l, l, l} } var r, g, b float64 var t1 float64 var t2 float64 var tr float64 var tg float64 var tb float64 if l < 0.5 { t1 = l * (1.0 + s) } else { t1 = l + s - l*s } t2 = 2*l - t1 h /= 360 tr = h + 1.0/3.0 tg = h tb = h - 1.0/3.0 if tr < 0 { tr++ } if tr > 1 { tr-- } if tg < 0 { tg++ } if tg > 1 { tg-- } if tb < 0 { tb++ } if tb > 1 { tb-- } // Red if 6*tr < 1 { r = t2 + (t1-t2)*6*tr } else if 2*tr < 1 { r = t1 } else if 3*tr < 2 { r = t2 + (t1-t2)*(2.0/3.0-tr)*6 } else { r = t2 } // Green if 6*tg < 1 { g = t2 + (t1-t2)*6*tg } else if 2*tg < 1 { g = t1 } else if 3*tg < 2 { g = t2 + (t1-t2)*(2.0/3.0-tg)*6 } else { g = t2 } // Blue if 6*tb < 1 { b = t2 + (t1-t2)*6*tb } else if 2*tb < 1 { b = t1 } else if 3*tb < 2 { b = t2 + (t1-t2)*(2.0/3.0-tb)*6 } else { b = t2 } return Color{r, g, b} } /// Hex /// /////////// // Hex returns the hex "html" representation of the color, as in #ff0080. func (col Color) Hex() string { // Add 0.5 for rounding return fmt.Sprintf("#%02x%02x%02x", uint8(col.R*255.0+0.5), uint8(col.G*255.0+0.5), uint8(col.B*255.0+0.5)) } // Hex parses a "html" hex color-string, either in the 3 "#f0c" or 6 "#ff1034" digits form. func Hex(scol string) (Color, error) { if scol == "" || scol[0] != '#' { return Color{}, fmt.Errorf("color: %v is not a hex-color", scol) } var c Color var err error switch len(scol) { case 4: c, err = parseHexColor(scol[1:2], scol[2:3], scol[3:4], 4, 1.0/15.0) case 7: c, err = parseHexColor(scol[1:3], scol[3:5], scol[5:7], 8, 1.0/255.0) default: return Color{}, fmt.Errorf("color: %v is not a hex-color", scol) } if err != nil { return Color{}, fmt.Errorf("color: %v is not a hex-color: %w", scol, err) } return c, nil } func parseHexColor(r, g, b string, bits int, factor float64) (Color, error) { var c Color var v uint64 var err error if v, err = strconv.ParseUint(r, 16, bits); err != nil { return Color{}, err } c.R = float64(v) * factor if v, err = strconv.ParseUint(g, 16, bits); err != nil { return Color{}, err } c.G = float64(v) * factor if v, err = strconv.ParseUint(b, 16, bits); err != nil { return Color{}, err } c.B = float64(v) * factor return c, err } /// Linear /// ////////////// // http://www.sjbrown.co.uk/2004/05/14/gamma-correct-rendering/ // http://www.brucelindbloom.com/Eqn_RGB_to_XYZ.html func linearize(v float64) float64 { if v <= 0.04045 { return v / 12.92 } return math.Pow((v+0.055)/1.055, 2.4) } // LinearRgb converts the color into the linear RGB space (see http://www.sjbrown.co.uk/2004/05/14/gamma-correct-rendering/). func (col Color) LinearRgb() (r, g, b float64) { r = linearize(col.R) g = linearize(col.G) b = linearize(col.B) return } // A much faster and still quite precise linearization using a 6th-order Taylor approximation. // See the accompanying Jupyter notebook for derivation of the constants. func linearize_fast(v float64) float64 { v1 := v - 0.5 v2 := v1 * v1 v3 := v2 * v1 v4 := v2 * v2 // v5 := v3*v2 return -0.248750514614486 + 0.925583310193438*v + 1.16740237321695*v2 + 0.280457026598666*v3 - 0.0757991963780179*v4 //+ 0.0437040411548932*v5 } // FastLinearRgb is much faster than and almost as accurate as LinearRgb. // BUT it is important to NOTE that they only produce good results for valid colors r,g,b in [0,1]. func (col Color) FastLinearRgb() (r, g, b float64) { r = linearize_fast(col.R) g = linearize_fast(col.G) b = linearize_fast(col.B) return } func delinearize(v float64) float64 { if v <= 0.0031308 { return 12.92 * v } return 1.055*math.Pow(v, 1.0/2.4) - 0.055 } // LinearRgb creates an sRGB color out of the given linear RGB color (see http://www.sjbrown.co.uk/2004/05/14/gamma-correct-rendering/). func LinearRgb(r, g, b float64) Color { return Color{delinearize(r), delinearize(g), delinearize(b)} } func delinearize_fast(v float64) float64 { // This function (fractional root) is much harder to linearize, so we need to split. if v > 0.2 { v1 := v - 0.6 v2 := v1 * v1 v3 := v2 * v1 v4 := v2 * v2 v5 := v3 * v2 return 0.442430344268235 + 0.592178981271708*v - 0.287864782562636*v2 + 0.253214392068985*v3 - 0.272557158129811*v4 + 0.325554383321718*v5 } else if v > 0.03 { v1 := v - 0.115 v2 := v1 * v1 v3 := v2 * v1 v4 := v2 * v2 v5 := v3 * v2 return 0.194915592891669 + 1.55227076330229*v - 3.93691860257828*v2 + 18.0679839248761*v3 - 101.468750302746*v4 + 632.341487393927*v5 } else { v1 := v - 0.015 v2 := v1 * v1 v3 := v2 * v1 v4 := v2 * v2 v5 := v3 * v2 // You can clearly see from the involved constants that the low-end is highly nonlinear. return 0.0519565234928877 + 5.09316778537561*v - 99.0338180489702*v2 + 3484.52322764895*v3 - 150028.083412663*v4 + 7168008.42971613*v5 } } // FastLinearRgb is much faster than and almost as accurate as LinearRgb. // BUT it is important to NOTE that they only produce good results for valid inputs r,g,b in [0,1]. func FastLinearRgb(r, g, b float64) Color { return Color{delinearize_fast(r), delinearize_fast(g), delinearize_fast(b)} } // XyzToLinearRgb converts from CIE XYZ-space to Linear RGB space. func XyzToLinearRgb(x, y, z float64) (r, g, b float64) { r = 3.2409699419045214*x - 1.5373831775700935*y - 0.49861076029300328*z g = -0.96924363628087983*x + 1.8759675015077207*y + 0.041555057407175613*z b = 0.055630079696993609*x - 0.20397695888897657*y + 1.0569715142428786*z return } func LinearRgbToXyz(r, g, b float64) (x, y, z float64) { x = 0.41239079926595948*r + 0.35758433938387796*g + 0.18048078840183429*b y = 0.21263900587151036*r + 0.71516867876775593*g + 0.072192315360733715*b z = 0.019330818715591851*r + 0.11919477979462599*g + 0.95053215224966058*b return } // BlendLinearRgb blends two colors in the Linear RGB color-space. // Unlike BlendRgb, this will not produce dark color around the center. // t == 0 results in c1, t == 1 results in c2 func (c1 Color) BlendLinearRgb(c2 Color, t float64) Color { r1, g1, b1 := c1.LinearRgb() r2, g2, b2 := c2.LinearRgb() return LinearRgb( r1+t*(r2-r1), g1+t*(g2-g1), b1+t*(b2-b1), ) } /// XYZ /// /////////// // http://www.sjbrown.co.uk/2004/05/14/gamma-correct-rendering/ func (col Color) Xyz() (x, y, z float64) { return LinearRgbToXyz(col.LinearRgb()) } func Xyz(x, y, z float64) Color { return LinearRgb(XyzToLinearRgb(x, y, z)) } /// xyY /// /////////// // http://www.brucelindbloom.com/Eqn_XYZ_to_xyY.html // Well, the name is bad, since it's xyY but Golang needs me to start with a // capital letter to make the method public. func XyzToXyy(X, Y, Z float64) (x, y, Yout float64) { return XyzToXyyWhiteRef(X, Y, Z, D65) } func XyzToXyyWhiteRef(X, Y, Z float64, wref [3]float64) (x, y, Yout float64) { Yout = Y N := X + Y + Z if math.Abs(N) < 1e-14 { // When we have black, Bruce Lindbloom recommends to use // the reference white's chromacity for x and y. x = wref[0] / (wref[0] + wref[1] + wref[2]) y = wref[1] / (wref[0] + wref[1] + wref[2]) } else { x = X / N y = Y / N } return } func XyyToXyz(x, y, Y float64) (X, Yout, Z float64) { Yout = Y if -1e-14 < y && y < 1e-14 { X = 0.0 Z = 0.0 } else { X = Y / y * x Z = Y / y * (1.0 - x - y) } return } // Converts the given color to CIE xyY space using D65 as reference white. // (Note that the reference white is only used for black input.) // x, y and Y are in [0..1] func (col Color) Xyy() (x, y, Y float64) { return XyzToXyy(col.Xyz()) } // Converts the given color to CIE xyY space, taking into account // a given reference white. (i.e. the monitor's white) // (Note that the reference white is only used for black input.) // x, y and Y are in [0..1] func (col Color) XyyWhiteRef(wref [3]float64) (x, y, Y float64) { X, Y2, Z := col.Xyz() return XyzToXyyWhiteRef(X, Y2, Z, wref) } // Generates a color by using data given in CIE xyY space. // x, y and Y are in [0..1] func Xyy(x, y, Y float64) Color { return Xyz(XyyToXyz(x, y, Y)) } /// L*a*b* /// ////////////// // http://en.wikipedia.org/wiki/Lab_color_space#CIELAB-CIEXYZ_conversions // For L*a*b*, we need to L*a*b*<->XYZ->RGB and the first one is device dependent. func lab_f(t float64) float64 { if t > 6.0/29.0*6.0/29.0*6.0/29.0 { return math.Cbrt(t) } return t/3.0*29.0/6.0*29.0/6.0 + 4.0/29.0 } func XyzToLab(x, y, z float64) (l, a, b float64) { // Use D65 white as reference point by default. // http://www.fredmiranda.com/forum/topic/1035332 // http://en.wikipedia.org/wiki/Standard_illuminant return XyzToLabWhiteRef(x, y, z, D65) } func XyzToLabWhiteRef(x, y, z float64, wref [3]float64) (l, a, b float64) { fy := lab_f(y / wref[1]) l = 1.16*fy - 0.16 a = 5.0 * (lab_f(x/wref[0]) - fy) b = 2.0 * (fy - lab_f(z/wref[2])) return } func lab_finv(t float64) float64 { if t > 6.0/29.0 { return t * t * t } return 3.0 * 6.0 / 29.0 * 6.0 / 29.0 * (t - 4.0/29.0) } func LabToXyz(l, a, b float64) (x, y, z float64) { // D65 white (see above). return LabToXyzWhiteRef(l, a, b, D65) } func LabToXyzWhiteRef(l, a, b float64, wref [3]float64) (x, y, z float64) { l2 := (l + 0.16) / 1.16 x = wref[0] * lab_finv(l2+a/5.0) y = wref[1] * lab_finv(l2) z = wref[2] * lab_finv(l2-b/2.0) return } // Converts the given color to CIE L*a*b* space using D65 as reference white. func (col Color) Lab() (l, a, b float64) { return XyzToLab(col.Xyz()) } // Converts the given color to CIE L*a*b* space, taking into account // a given reference white. (i.e. the monitor's white) func (col Color) LabWhiteRef(wref [3]float64) (l, a, b float64) { x, y, z := col.Xyz() return XyzToLabWhiteRef(x, y, z, wref) } // Generates a color by using data given in CIE L*a*b* space using D65 as reference white. // WARNING: many combinations of `l`, `a`, and `b` values do not have corresponding // valid RGB values, check the FAQ in the README if you're unsure. func Lab(l, a, b float64) Color { return Xyz(LabToXyz(l, a, b)) } // Generates a color by using data given in CIE L*a*b* space, taking // into account a given reference white. (i.e. the monitor's white) func LabWhiteRef(l, a, b float64, wref [3]float64) Color { return Xyz(LabToXyzWhiteRef(l, a, b, wref)) } // DistanceLab is a good measure of visual similarity between two colors! // A result of 0 would mean identical colors, while a result of 1 or higher // means the colors differ a lot. func (c1 Color) DistanceLab(c2 Color) float64 { l1, a1, b1 := c1.Lab() l2, a2, b2 := c2.Lab() return math.Sqrt(sq(l1-l2) + sq(a1-a2) + sq(b1-b2)) } // DistanceCIE76 is the same as DistanceLab. func (c1 Color) DistanceCIE76(c2 Color) float64 { return c1.DistanceLab(c2) } // Uses the CIE94 formula to calculate color distance. More accurate than // DistanceLab, but also more work. func (cl Color) DistanceCIE94(cr Color) float64 { l1, a1, b1 := cl.Lab() l2, a2, b2 := cr.Lab() // NOTE: Since all those formulas expect L,a,b values 100x larger than we // have them in this library, we either need to adjust all constants // in the formula, or convert the ranges of L,a,b before, and then // scale the distances down again. The latter is less error-prone. l1, a1, b1 = l1*100.0, a1*100.0, b1*100.0 l2, a2, b2 = l2*100.0, a2*100.0, b2*100.0 kl := 1.0 // 2.0 for textiles kc := 1.0 kh := 1.0 k1 := 0.045 // 0.048 for textiles k2 := 0.015 // 0.014 for textiles. deltaL := l1 - l2 c1 := math.Sqrt(sq(a1) + sq(b1)) c2 := math.Sqrt(sq(a2) + sq(b2)) deltaCab := c1 - c2 // Not taking Sqrt here for stability, and it's unnecessary. deltaHab2 := sq(a1-a2) + sq(b1-b2) - sq(deltaCab) sl := 1.0 sc := 1.0 + k1*c1 sh := 1.0 + k2*c1 vL2 := sq(deltaL / (kl * sl)) vC2 := sq(deltaCab / (kc * sc)) vH2 := deltaHab2 / sq(kh*sh) return math.Sqrt(vL2+vC2+vH2) * 0.01 // See above. } // DistanceCIEDE2000 uses the Delta E 2000 formula to calculate color // distance. It is more expensive but more accurate than both DistanceLab // and DistanceCIE94. func (cl Color) DistanceCIEDE2000(cr Color) float64 { return cl.DistanceCIEDE2000klch(cr, 1.0, 1.0, 1.0) } // DistanceCIEDE2000klch uses the Delta E 2000 formula with custom values // for the weighting factors kL, kC, and kH. func (cl Color) DistanceCIEDE2000klch(cr Color, kl, kc, kh float64) float64 { l1, a1, b1 := cl.Lab() l2, a2, b2 := cr.Lab() // As with CIE94, we scale up the ranges of L,a,b beforehand and scale // them down again afterwards. l1, a1, b1 = l1*100.0, a1*100.0, b1*100.0 l2, a2, b2 = l2*100.0, a2*100.0, b2*100.0 cab1 := math.Sqrt(sq(a1) + sq(b1)) cab2 := math.Sqrt(sq(a2) + sq(b2)) cabmean := (cab1 + cab2) / 2 g := 0.5 * (1 - math.Sqrt(math.Pow(cabmean, 7)/(math.Pow(cabmean, 7)+math.Pow(25, 7)))) ap1 := (1 + g) * a1 ap2 := (1 + g) * a2 cp1 := math.Sqrt(sq(ap1) + sq(b1)) cp2 := math.Sqrt(sq(ap2) + sq(b2)) hp1 := 0.0 if b1 != ap1 || ap1 != 0 { hp1 = math.Atan2(b1, ap1) if hp1 < 0 { hp1 += math.Pi * 2 } hp1 *= 180 / math.Pi } hp2 := 0.0 if b2 != ap2 || ap2 != 0 { hp2 = math.Atan2(b2, ap2) if hp2 < 0 { hp2 += math.Pi * 2 } hp2 *= 180 / math.Pi } deltaLp := l2 - l1 deltaCp := cp2 - cp1 dhp := 0.0 cpProduct := cp1 * cp2 if cpProduct != 0 { dhp = hp2 - hp1 if dhp > 180 { dhp -= 360 } else if dhp < -180 { dhp += 360 } } deltaHp := 2 * math.Sqrt(cpProduct) * math.Sin(dhp/2*math.Pi/180) lpmean := (l1 + l2) / 2 cpmean := (cp1 + cp2) / 2 hpmean := hp1 + hp2 if cpProduct != 0 { hpmean /= 2 if math.Abs(hp1-hp2) > 180 { if hp1+hp2 < 360 { hpmean += 180 } else { hpmean -= 180 } } } t := 1 - 0.17*math.Cos((hpmean-30)*math.Pi/180) + 0.24*math.Cos(2*hpmean*math.Pi/180) + 0.32*math.Cos((3*hpmean+6)*math.Pi/180) - 0.2*math.Cos((4*hpmean-63)*math.Pi/180) deltaTheta := 30 * math.Exp(-sq((hpmean-275)/25)) rc := 2 * math.Sqrt(math.Pow(cpmean, 7)/(math.Pow(cpmean, 7)+math.Pow(25, 7))) sl := 1 + (0.015*sq(lpmean-50))/math.Sqrt(20+sq(lpmean-50)) sc := 1 + 0.045*cpmean sh := 1 + 0.015*cpmean*t rt := -math.Sin(2*deltaTheta*math.Pi/180) * rc return math.Sqrt(sq(deltaLp/(kl*sl))+sq(deltaCp/(kc*sc))+sq(deltaHp/(kh*sh))+rt*(deltaCp/(kc*sc))*(deltaHp/(kh*sh))) * 0.01 } // BlendLab blends two colors in the L*a*b* color-space, which should result in a smoother blend. // t == 0 results in c1, t == 1 results in c2 func (c1 Color) BlendLab(c2 Color, t float64) Color { l1, a1, b1 := c1.Lab() l2, a2, b2 := c2.Lab() return Lab(l1+t*(l2-l1), a1+t*(a2-a1), b1+t*(b2-b1)) } /// L*u*v* /// ////////////// // http://en.wikipedia.org/wiki/CIELUV#XYZ_.E2.86.92_CIELUV_and_CIELUV_.E2.86.92_XYZ_conversions // For L*u*v*, we need to L*u*v*<->XYZ<->RGB and the first one is device dependent. func XyzToLuv(x, y, z float64) (l, a, b float64) { // Use D65 white as reference point by default. // http://www.fredmiranda.com/forum/topic/1035332 // http://en.wikipedia.org/wiki/Standard_illuminant return XyzToLuvWhiteRef(x, y, z, D65) } func XyzToLuvWhiteRef(x, y, z float64, wref [3]float64) (l, u, v float64) { if y/wref[1] <= 6.0/29.0*6.0/29.0*6.0/29.0 { l = y / wref[1] * (29.0 / 3.0 * 29.0 / 3.0 * 29.0 / 3.0) / 100.0 } else { l = 1.16*math.Cbrt(y/wref[1]) - 0.16 } ubis, vbis := xyz_to_uv(x, y, z) un, vn := xyz_to_uv(wref[0], wref[1], wref[2]) u = 13.0 * l * (ubis - un) v = 13.0 * l * (vbis - vn) return } // For this part, we do as R's graphics.hcl does, not as wikipedia does. // Or is it the same? func xyz_to_uv(x, y, z float64) (u, v float64) { denom := x + 15.0*y + 3.0*z if denom == 0.0 { u, v = 0.0, 0.0 } else { u = 4.0 * x / denom v = 9.0 * y / denom } return } func LuvToXyz(l, u, v float64) (x, y, z float64) { // D65 white (see above). return LuvToXyzWhiteRef(l, u, v, D65) } func LuvToXyzWhiteRef(l, u, v float64, wref [3]float64) (x, y, z float64) { // y = wref[1] * lab_finv((l + 0.16) / 1.16) if l <= 0.08 { y = wref[1] * l * 100.0 * 3.0 / 29.0 * 3.0 / 29.0 * 3.0 / 29.0 } else { y = wref[1] * cub((l+0.16)/1.16) } un, vn := xyz_to_uv(wref[0], wref[1], wref[2]) if l != 0.0 { ubis := u/(13.0*l) + un vbis := v/(13.0*l) + vn x = y * 9.0 * ubis / (4.0 * vbis) z = y * (12.0 - 3.0*ubis - 20.0*vbis) / (4.0 * vbis) } else { x, y = 0.0, 0.0 } return } // Converts the given color to CIE L*u*v* space using D65 as reference white. // L* is in [0..1] and both u* and v* are in about [-1..1] func (col Color) Luv() (l, u, v float64) { return XyzToLuv(col.Xyz()) } // Converts the given color to CIE L*u*v* space, taking into account // a given reference white. (i.e. the monitor's white) // L* is in [0..1] and both u* and v* are in about [-1..1] func (col Color) LuvWhiteRef(wref [3]float64) (l, u, v float64) { x, y, z := col.Xyz() return XyzToLuvWhiteRef(x, y, z, wref) } // Generates a color by using data given in CIE L*u*v* space using D65 as reference white. // L* is in [0..1] and both u* and v* are in about [-1..1] // WARNING: many combinations of `l`, `u`, and `v` values do not have corresponding // valid RGB values, check the FAQ in the README if you're unsure. func Luv(l, u, v float64) Color { return Xyz(LuvToXyz(l, u, v)) } // Generates a color by using data given in CIE L*u*v* space, taking // into account a given reference white. (i.e. the monitor's white) // L* is in [0..1] and both u* and v* are in about [-1..1] func LuvWhiteRef(l, u, v float64, wref [3]float64) Color { return Xyz(LuvToXyzWhiteRef(l, u, v, wref)) } // DistanceLuv is a good measure of visual similarity between two colors! // A result of 0 would mean identical colors, while a result of 1 or higher // means the colors differ a lot. func (c1 Color) DistanceLuv(c2 Color) float64 { l1, u1, v1 := c1.Luv() l2, u2, v2 := c2.Luv() return math.Sqrt(sq(l1-l2) + sq(u1-u2) + sq(v1-v2)) } // BlendLuv blends two colors in the CIE-L*u*v* color-space, which should result in a smoother blend. // t == 0 results in c1, t == 1 results in c2 func (c1 Color) BlendLuv(c2 Color, t float64) Color { l1, u1, v1 := c1.Luv() l2, u2, v2 := c2.Luv() return Luv(l1+t*(l2-l1), u1+t*(u2-u1), v1+t*(v2-v1)) } /// HCL /// /////////// // HCL is nothing else than L*a*b* in cylindrical coordinates! // (this was wrong on English wikipedia, I fixed it, let's hope the fix stays.) // But it is widely popular since it is a "correct HSV" // http://www.hunterlab.com/appnotes/an09_96a.pdf // Converts the given color to HCL space using D65 as reference white. // H values are in [0..360], C and L values are in [0..1] although C can overshoot 1.0 func (col Color) Hcl() (h, c, l float64) { return col.HclWhiteRef(D65) } func LabToHcl(L, a, b float64) (h, c, l float64) { // Oops, floating point workaround necessary if a ~= b and both are very small (i.e. almost zero). if math.Abs(b-a) > 1e-4 && math.Abs(a) > 1e-4 { h = math.Mod(57.29577951308232087721*math.Atan2(b, a)+360.0, 360.0) // Rad2Deg } else { h = 0.0 } c = math.Sqrt(sq(a) + sq(b)) l = L return } // Converts the given color to HCL space, taking into account // a given reference white. (i.e. the monitor's white) // H values are in [0..360], C and L values are in [0..1] func (col Color) HclWhiteRef(wref [3]float64) (h, c, l float64) { L, a, b := col.LabWhiteRef(wref) return LabToHcl(L, a, b) } // Generates a color by using data given in HCL space using D65 as reference white. // H values are in [0..360], C and L values are in [0..1] // WARNING: many combinations of `h`, `c`, and `l` values do not have corresponding // valid RGB values, check the FAQ in the README if you're unsure. func Hcl(h, c, l float64) Color { return HclWhiteRef(h, c, l, D65) } func HclToLab(h, c, l float64) (L, a, b float64) { H := 0.01745329251994329576 * h // Deg2Rad a = c * math.Cos(H) b = c * math.Sin(H) L = l return } // Generates a color by using data given in HCL space, taking // into account a given reference white. (i.e. the monitor's white) // H values are in [0..360], C and L values are in [0..1] func HclWhiteRef(h, c, l float64, wref [3]float64) Color { L, a, b := HclToLab(h, c, l) return LabWhiteRef(L, a, b, wref) } // BlendHcl blends two colors in the CIE-L*C*h° color-space, which should result in a smoother blend. // t == 0 results in c1, t == 1 results in c2 func (col1 Color) BlendHcl(col2 Color, t float64) Color { h1, c1, l1 := col1.Hcl() h2, c2, l2 := col2.Hcl() // https://github.com/lucasb-eyer/go-colorful/pull/60 if c1 <= 0.00015 && c2 >= 0.00015 { h1 = h2 } else if c2 <= 0.00015 && c1 >= 0.00015 { h2 = h1 } // We know that h are both in [0..360] return Hcl(interp_angle(h1, h2, t), c1+t*(c2-c1), l1+t*(l2-l1)).Clamped() } // LuvLch // Converts the given color to LuvLCh space using D65 as reference white. // h values are in [0..360], C and L values are in [0..1] although C can overshoot 1.0 func (col Color) LuvLCh() (l, c, h float64) { return col.LuvLChWhiteRef(D65) } func LuvToLuvLCh(L, u, v float64) (l, c, h float64) { // Oops, floating point workaround necessary if u ~= v and both are very small (i.e. almost zero). if math.Abs(v-u) > 1e-4 && math.Abs(u) > 1e-4 { h = math.Mod(57.29577951308232087721*math.Atan2(v, u)+360.0, 360.0) // Rad2Deg } else { h = 0.0 } l = L c = math.Sqrt(sq(u) + sq(v)) return } // Converts the given color to LuvLCh space, taking into account // a given reference white. (i.e. the monitor's white) // h values are in [0..360], c and l values are in [0..1] func (col Color) LuvLChWhiteRef(wref [3]float64) (l, c, h float64) { return LuvToLuvLCh(col.LuvWhiteRef(wref)) } // Generates a color by using data given in LuvLCh space using D65 as reference white. // h values are in [0..360], C and L values are in [0..1] // WARNING: many combinations of `l`, `c`, and `h` values do not have corresponding // valid RGB values, check the FAQ in the README if you're unsure. func LuvLCh(l, c, h float64) Color { return LuvLChWhiteRef(l, c, h, D65) } func LuvLChToLuv(l, c, h float64) (L, u, v float64) { H := 0.01745329251994329576 * h // Deg2Rad u = c * math.Cos(H) v = c * math.Sin(H) L = l return } // Generates a color by using data given in LuvLCh space, taking // into account a given reference white. (i.e. the monitor's white) // h values are in [0..360], C and L values are in [0..1] func LuvLChWhiteRef(l, c, h float64, wref [3]float64) Color { L, u, v := LuvLChToLuv(l, c, h) return LuvWhiteRef(L, u, v, wref) } // BlendLuvLCh blends two colors in the cylindrical CIELUV color space. // t == 0 results in c1, t == 1 results in c2 func (col1 Color) BlendLuvLCh(col2 Color, t float64) Color { l1, c1, h1 := col1.LuvLCh() l2, c2, h2 := col2.LuvLCh() // We know that h are both in [0..360] return LuvLCh(l1+t*(l2-l1), c1+t*(c2-c1), interp_angle(h1, h2, t)) } /// OkLab /// /////////// func (col Color) OkLab() (l, a, b float64) { return XyzToOkLab(col.Xyz()) } func OkLab(l, a, b float64) Color { return Xyz(OkLabToXyz(l, a, b)) } func XyzToOkLab(x, y, z float64) (l, a, b float64) { l_ := math.Cbrt(0.8189330101*x + 0.3618667424*y - 0.1288597137*z) m_ := math.Cbrt(0.0329845436*x + 0.9293118715*y + 0.0361456387*z) s_ := math.Cbrt(0.0482003018*x + 0.2643662691*y + 0.6338517070*z) l = 0.2104542553*l_ + 0.7936177850*m_ - 0.0040720468*s_ a = 1.9779984951*l_ - 2.4285922050*m_ + 0.4505937099*s_ b = 0.0259040371*l_ + 0.7827717662*m_ - 0.8086757660*s_ return } func OkLabToXyz(l, a, b float64) (x, y, z float64) { l_ := 0.9999999984505196*l + 0.39633779217376774*a + 0.2158037580607588*b m_ := 1.0000000088817607*l - 0.10556134232365633*a - 0.0638541747717059*b s_ := 1.0000000546724108*l - 0.08948418209496574*a - 1.2914855378640917*b ll := math.Pow(l_, 3) m := math.Pow(m_, 3) s := math.Pow(s_, 3) x = 1.2268798733741557*ll - 0.5578149965554813*m + 0.28139105017721594*s y = -0.04057576262431372*ll + 1.1122868293970594*m - 0.07171106666151696*s z = -0.07637294974672142*ll - 0.4214933239627916*m + 1.5869240244272422*s return } // BlendOkLab blends two colors in the OkLab color-space, which should result in a better blend (even compared to BlendLab). func (c1 Color) BlendOkLab(c2 Color, t float64) Color { l1, a1, b1 := c1.OkLab() l2, a2, b2 := c2.OkLab() return OkLab(l1+t*(l2-l1), a1+t*(a2-a1), b1+t*(b2-b1)) } /// OkLch /// /////////// func (col Color) OkLch() (l, c, h float64) { return OkLabToOkLch(col.OkLab()) } func OkLch(l, c, h float64) Color { return Xyz(OkLchToXyz(l, c, h)) } func XyzToOkLch(x, y, z float64) (float64, float64, float64) { l, c, h := OkLabToOkLch(XyzToOkLab(x, y, z)) return l, c, h } func OkLchToXyz(l, c, h float64) (float64, float64, float64) { x, y, z := OkLabToXyz(OkLchToOkLab(l, c, h)) return x, y, z } func OkLabToOkLch(l, a, b float64) (float64, float64, float64) { c := math.Sqrt((a * a) + (b * b)) h := math.Atan2(b, a) if h < 0 { h += 2 * math.Pi } return l, c, h * 180 / math.Pi } func OkLchToOkLab(l, c, h float64) (float64, float64, float64) { h *= math.Pi / 180 a := c * math.Cos(h) b := c * math.Sin(h) return l, a, b } // BlendOkLch blends two colors in the OkLch color-space, which should result in a better blend (even compared to BlendHcl). func (col1 Color) BlendOkLch(col2 Color, t float64) Color { l1, c1, h1 := col1.OkLch() l2, c2, h2 := col2.OkLch() // https://github.com/lucasb-eyer/go-colorful/pull/60 if c1 <= 0.00015 && c2 >= 0.00015 { h1 = h2 } else if c2 <= 0.00015 && c1 >= 0.00015 { h2 = h1 } // We know that h are both in [0..360] return OkLch(l1+t*(l2-l1), c1+t*(c2-c1), interp_angle(h1, h2, t)).Clamped() }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/lucasb-eyer/go-colorful/hexcolor.go
vendor/github.com/lucasb-eyer/go-colorful/hexcolor.go
package colorful import ( "database/sql/driver" "encoding/json" "fmt" "reflect" ) // A HexColor is a Color stored as a hex string "#rrggbb". It implements the // database/sql.Scanner, database/sql/driver.Value, // encoding/json.Unmarshaler and encoding/json.Marshaler interfaces. type HexColor Color type errUnsupportedType struct { got interface{} want reflect.Type } func (hc *HexColor) Scan(value interface{}) error { s, ok := value.(string) if !ok { return errUnsupportedType{got: reflect.TypeOf(value), want: reflect.TypeOf("")} } c, err := Hex(s) if err != nil { return err } *hc = HexColor(c) return nil } func (hc *HexColor) Value() (driver.Value, error) { return Color(*hc).Hex(), nil } func (e errUnsupportedType) Error() string { return fmt.Sprintf("unsupported type: got %v, want a %s", e.got, e.want) } func (hc *HexColor) UnmarshalJSON(data []byte) error { var hexCode string if err := json.Unmarshal(data, &hexCode); err != nil { return err } var col, err = Hex(hexCode) if err != nil { return err } *hc = HexColor(col) return nil } func (hc HexColor) MarshalJSON() ([]byte, error) { return json.Marshal(Color(hc).Hex()) } // Decode - deserialize function for https://github.com/kelseyhightower/envconfig func (hc *HexColor) Decode(hexCode string) error { var col, err = Hex(hexCode) if err != nil { return err } *hc = HexColor(col) return nil } func (hc HexColor) MarshalYAML() (interface{}, error) { return Color(hc).Hex(), nil } func (hc *HexColor) UnmarshalYAML(unmarshal func(interface{}) error) error { var hexCode string if err := unmarshal(&hexCode); err != nil { return err } var col, err = Hex(hexCode) if err != nil { return err } *hc = HexColor(col) return nil }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/lucasb-eyer/go-colorful/warm_palettegen.go
vendor/github.com/lucasb-eyer/go-colorful/warm_palettegen.go
package colorful // Uses the HSV color space to generate colors with similar S,V but distributed // evenly along their Hue. This is fast but not always pretty. // If you've got time to spare, use Lab (the non-fast below). func FastWarmPaletteWithRand(colorsCount int, rand RandInterface) (colors []Color) { colors = make([]Color, colorsCount) for i := 0; i < colorsCount; i++ { colors[i] = Hsv(float64(i)*(360.0/float64(colorsCount)), 0.55+rand.Float64()*0.2, 0.35+rand.Float64()*0.2) } return } func FastWarmPalette(colorsCount int) (colors []Color) { return FastWarmPaletteWithRand(colorsCount, getDefaultGlobalRand()) } func WarmPaletteWithRand(colorsCount int, rand RandInterface) ([]Color, error) { warmy := func(l, a, b float64) bool { _, c, _ := LabToHcl(l, a, b) return 0.1 <= c && c <= 0.4 && 0.2 <= l && l <= 0.5 } return SoftPaletteExWithRand(colorsCount, SoftPaletteSettings{warmy, 50, true}, rand) } func WarmPalette(colorsCount int) ([]Color, error) { return WarmPaletteWithRand(colorsCount, getDefaultGlobalRand()) }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/stefanhaller/git-todo-parser/todo/write.go
vendor/github.com/stefanhaller/git-todo-parser/todo/write.go
package todo import ( "io" "strings" ) func Write(f io.Writer, todos []Todo, commentChar byte) error { for _, todo := range todos { if err := writeTodo(f, todo, commentChar); err != nil { return err } } return nil } func writeTodo(f io.Writer, todo Todo, commentChar byte) error { var sb strings.Builder if todo.Command != Comment { sb.WriteString(todo.Command.String()) } switch todo.Command { case NoOp: case Comment: sb.WriteByte(commentChar) sb.WriteString(todo.Comment) case Break: case Label: fallthrough case Reset: sb.WriteByte(' ') sb.WriteString(todo.Label) case Exec: sb.WriteByte(' ') sb.WriteString(todo.ExecCommand) case Merge: sb.WriteByte(' ') if todo.Commit != "" { sb.WriteString(todo.Flag) sb.WriteByte(' ') sb.WriteString(todo.Commit) sb.WriteByte(' ') } sb.WriteString(todo.Label) if todo.Msg != "" { sb.WriteString(" # ") sb.WriteString(todo.Msg) } case Fixup: sb.WriteByte(' ') if todo.Flag != "" { sb.WriteString(todo.Flag) sb.WriteByte(' ') } sb.WriteString(todo.Commit) case UpdateRef: sb.WriteByte(' ') sb.WriteString(todo.Ref) case Pick: fallthrough case Revert: fallthrough case Edit: fallthrough case Reword: fallthrough case Squash: fallthrough case Drop: sb.WriteByte(' ') sb.WriteString(todo.Commit) if todo.Msg != "" { sb.WriteByte(' ') sb.WriteString(todo.Msg) } } sb.WriteByte('\n') _, err := f.Write([]byte(sb.String())) return err }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/stefanhaller/git-todo-parser/todo/todo.go
vendor/github.com/stefanhaller/git-todo-parser/todo/todo.go
package todo type TodoCommand uint8 const ( Pick TodoCommand = iota + 1 Revert Edit Reword Fixup Squash Exec Break Label Reset Merge NoOp Drop UpdateRef Comment ) type Todo struct { Command TodoCommand Commit string Flag string Comment string ExecCommand string Label string Msg string Ref string } func (t TodoCommand) String() string { return commandToString[t] } var commandToString = map[TodoCommand]string{ Pick: "pick", Revert: "revert", Edit: "edit", Reword: "reword", Fixup: "fixup", Squash: "squash", Exec: "exec", Break: "break", Label: "label", Reset: "reset", Merge: "merge", NoOp: "noop", Drop: "drop", UpdateRef: "update-ref", Comment: "comment", } var todoCommandInfo = [15]struct { nickname string cmd string }{ {"", ""}, // dummy value since we're using 1-based indexing {"p", "pick"}, {"", "revert"}, {"e", "edit"}, {"r", "reword"}, {"f", "fixup"}, {"s", "squash"}, {"x", "exec"}, {"b", "break"}, {"l", "label"}, {"t", "reset"}, {"m", "merge"}, {"", "noop"}, {"d", "drop"}, {"u", "update-ref"}, }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/stefanhaller/git-todo-parser/todo/parse.go
vendor/github.com/stefanhaller/git-todo-parser/todo/parse.go
package todo import ( "bufio" "errors" "fmt" "io" "strings" ) var ( ErrUnexpectedCommand = errors.New("unexpected command") ErrMissingLabel = errors.New("missing label") ErrMissingCommit = errors.New("missing commit") ErrMissingExecCmd = errors.New("missing command for exec") ErrMissingRef = errors.New("missing ref") ) func Parse(f io.Reader, commentChar byte) ([]Todo, error) { var result []Todo scanner := bufio.NewScanner(f) scanner.Split(bufio.ScanLines) for scanner.Scan() { line := scanner.Text() trimmed := strings.TrimSpace(line) if trimmed == "" { continue } cmd, err := parseLine(line, commentChar) if err != nil { return nil, fmt.Errorf("failed to parse line %q: %w", line, err) } result = append(result, cmd) } if err := scanner.Err(); err != nil { return nil, fmt.Errorf("failed to parse input: %w", err) } return result, nil } func parseLine(line string, commentChar byte) (Todo, error) { var todo Todo if line[0] == commentChar { todo.Command = Comment todo.Comment = line[1:] return todo, nil } fields := strings.Fields(line) var commandLen int for i := Pick; i < Comment; i++ { if isCommand(i, fields[0]) { todo.Command = i commandLen = len(fields[0]) fields = fields[1:] break } } if todo.Command == 0 { // unexpected command return todo, ErrUnexpectedCommand } if todo.Command == Break || todo.Command == NoOp { return todo, nil } if todo.Command == Label || todo.Command == Reset { restOfLine := strings.TrimSpace(line[commandLen:]) if todo.Command == Reset && restOfLine == "[new root]" { todo.Label = restOfLine } else if len(fields) == 0 { return todo, ErrMissingLabel } else { todo.Label = fields[0] } return todo, nil } if todo.Command == Exec { if len(fields) == 0 { return todo, ErrMissingExecCmd } todo.ExecCommand = strings.Join(fields, " ") return todo, nil } if todo.Command == Merge { if fields[0] == "-C" || fields[0] == "-c" { todo.Flag = fields[0] fields = fields[1:] if len(fields) == 0 { return todo, ErrMissingCommit } todo.Commit = fields[0] fields = fields[1:] } if len(fields) == 0 { return todo, ErrMissingLabel } todo.Label = fields[0] fields = fields[1:] if len(fields) > 0 && fields[0] == "#" { fields = fields[1:] todo.Msg = strings.Join(fields, " ") } return todo, nil } if todo.Command == Fixup { if len(fields) == 0 { return todo, ErrMissingCommit } // Skip flags if fields[0] == "-C" || fields[0] == "-c" { todo.Flag = fields[0] fields = fields[1:] } } if todo.Command == UpdateRef { if len(fields) == 0 { return todo, ErrMissingRef } todo.Ref = fields[0] return todo, nil } if len(fields) == 0 { return todo, ErrMissingCommit } todo.Commit = fields[0] fields = fields[1:] // Trim comment char and whitespace todo.Msg = strings.TrimPrefix(strings.Join(fields, " "), fmt.Sprintf("%c ", commentChar)) return todo, nil } func isCommand(i TodoCommand, s string) bool { return len(s) > 0 && (todoCommandInfo[i].cmd == s || todoCommandInfo[i].nickname == s) }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/rivo/uniseg/emojipresentation.go
vendor/github.com/rivo/uniseg/emojipresentation.go
// Code generated via go generate from gen_properties.go. DO NOT EDIT. package uniseg // emojiPresentation are taken from // // and // https://unicode.org/Public/15.0.0/ucd/emoji/emoji-data.txt // ("Extended_Pictographic" only) // on September 5, 2023. See https://www.unicode.org/license.html for the Unicode // license agreement. var emojiPresentation = [][3]int{ {0x231A, 0x231B, prEmojiPresentation}, // E0.6 [2] (⌚..⌛) watch..hourglass done {0x23E9, 0x23EC, prEmojiPresentation}, // E0.6 [4] (⏩..⏬) fast-forward button..fast down button {0x23F0, 0x23F0, prEmojiPresentation}, // E0.6 [1] (⏰) alarm clock {0x23F3, 0x23F3, prEmojiPresentation}, // E0.6 [1] (⏳) hourglass not done {0x25FD, 0x25FE, prEmojiPresentation}, // E0.6 [2] (◽..◾) white medium-small square..black medium-small square {0x2614, 0x2615, prEmojiPresentation}, // E0.6 [2] (☔..☕) umbrella with rain drops..hot beverage {0x2648, 0x2653, prEmojiPresentation}, // E0.6 [12] (♈..♓) Aries..Pisces {0x267F, 0x267F, prEmojiPresentation}, // E0.6 [1] (♿) wheelchair symbol {0x2693, 0x2693, prEmojiPresentation}, // E0.6 [1] (⚓) anchor {0x26A1, 0x26A1, prEmojiPresentation}, // E0.6 [1] (⚡) high voltage {0x26AA, 0x26AB, prEmojiPresentation}, // E0.6 [2] (⚪..⚫) white circle..black circle {0x26BD, 0x26BE, prEmojiPresentation}, // E0.6 [2] (⚽..⚾) soccer ball..baseball {0x26C4, 0x26C5, prEmojiPresentation}, // E0.6 [2] (⛄..⛅) snowman without snow..sun behind cloud {0x26CE, 0x26CE, prEmojiPresentation}, // E0.6 [1] (⛎) Ophiuchus {0x26D4, 0x26D4, prEmojiPresentation}, // E0.6 [1] (⛔) no entry {0x26EA, 0x26EA, prEmojiPresentation}, // E0.6 [1] (⛪) church {0x26F2, 0x26F3, prEmojiPresentation}, // E0.6 [2] (⛲..⛳) fountain..flag in hole {0x26F5, 0x26F5, prEmojiPresentation}, // E0.6 [1] (⛵) sailboat {0x26FA, 0x26FA, prEmojiPresentation}, // E0.6 [1] (⛺) tent {0x26FD, 0x26FD, prEmojiPresentation}, // E0.6 [1] (⛽) fuel pump {0x2705, 0x2705, prEmojiPresentation}, // E0.6 [1] (✅) check mark button {0x270A, 0x270B, prEmojiPresentation}, // E0.6 [2] (✊..✋) raised fist..raised hand {0x2728, 0x2728, prEmojiPresentation}, // E0.6 [1] (✨) sparkles {0x274C, 0x274C, prEmojiPresentation}, // E0.6 [1] (❌) cross mark {0x274E, 0x274E, prEmojiPresentation}, // E0.6 [1] (❎) cross mark button {0x2753, 0x2755, prEmojiPresentation}, // E0.6 [3] (❓..❕) red question mark..white exclamation mark {0x2757, 0x2757, prEmojiPresentation}, // E0.6 [1] (❗) red exclamation mark {0x2795, 0x2797, prEmojiPresentation}, // E0.6 [3] (➕..➗) plus..divide {0x27B0, 0x27B0, prEmojiPresentation}, // E0.6 [1] (➰) curly loop {0x27BF, 0x27BF, prEmojiPresentation}, // E1.0 [1] (➿) double curly loop {0x2B1B, 0x2B1C, prEmojiPresentation}, // E0.6 [2] (⬛..⬜) black large square..white large square {0x2B50, 0x2B50, prEmojiPresentation}, // E0.6 [1] (⭐) star {0x2B55, 0x2B55, prEmojiPresentation}, // E0.6 [1] (⭕) hollow red circle {0x1F004, 0x1F004, prEmojiPresentation}, // E0.6 [1] (🀄) mahjong red dragon {0x1F0CF, 0x1F0CF, prEmojiPresentation}, // E0.6 [1] (🃏) joker {0x1F18E, 0x1F18E, prEmojiPresentation}, // E0.6 [1] (🆎) AB button (blood type) {0x1F191, 0x1F19A, prEmojiPresentation}, // E0.6 [10] (🆑..🆚) CL button..VS button {0x1F1E6, 0x1F1FF, prEmojiPresentation}, // E0.0 [26] (🇦..🇿) regional indicator symbol letter a..regional indicator symbol letter z {0x1F201, 0x1F201, prEmojiPresentation}, // E0.6 [1] (🈁) Japanese “here” button {0x1F21A, 0x1F21A, prEmojiPresentation}, // E0.6 [1] (🈚) Japanese “free of charge” button {0x1F22F, 0x1F22F, prEmojiPresentation}, // E0.6 [1] (🈯) Japanese “reserved” button {0x1F232, 0x1F236, prEmojiPresentation}, // E0.6 [5] (🈲..🈶) Japanese “prohibited” button..Japanese “not free of charge” button {0x1F238, 0x1F23A, prEmojiPresentation}, // E0.6 [3] (🈸..🈺) Japanese “application” button..Japanese “open for business” button {0x1F250, 0x1F251, prEmojiPresentation}, // E0.6 [2] (🉐..🉑) Japanese “bargain” button..Japanese “acceptable” button {0x1F300, 0x1F30C, prEmojiPresentation}, // E0.6 [13] (🌀..🌌) cyclone..milky way {0x1F30D, 0x1F30E, prEmojiPresentation}, // E0.7 [2] (🌍..🌎) globe showing Europe-Africa..globe showing Americas {0x1F30F, 0x1F30F, prEmojiPresentation}, // E0.6 [1] (🌏) globe showing Asia-Australia {0x1F310, 0x1F310, prEmojiPresentation}, // E1.0 [1] (🌐) globe with meridians {0x1F311, 0x1F311, prEmojiPresentation}, // E0.6 [1] (🌑) new moon {0x1F312, 0x1F312, prEmojiPresentation}, // E1.0 [1] (🌒) waxing crescent moon {0x1F313, 0x1F315, prEmojiPresentation}, // E0.6 [3] (🌓..🌕) first quarter moon..full moon {0x1F316, 0x1F318, prEmojiPresentation}, // E1.0 [3] (🌖..🌘) waning gibbous moon..waning crescent moon {0x1F319, 0x1F319, prEmojiPresentation}, // E0.6 [1] (🌙) crescent moon {0x1F31A, 0x1F31A, prEmojiPresentation}, // E1.0 [1] (🌚) new moon face {0x1F31B, 0x1F31B, prEmojiPresentation}, // E0.6 [1] (🌛) first quarter moon face {0x1F31C, 0x1F31C, prEmojiPresentation}, // E0.7 [1] (🌜) last quarter moon face {0x1F31D, 0x1F31E, prEmojiPresentation}, // E1.0 [2] (🌝..🌞) full moon face..sun with face {0x1F31F, 0x1F320, prEmojiPresentation}, // E0.6 [2] (🌟..🌠) glowing star..shooting star {0x1F32D, 0x1F32F, prEmojiPresentation}, // E1.0 [3] (🌭..🌯) hot dog..burrito {0x1F330, 0x1F331, prEmojiPresentation}, // E0.6 [2] (🌰..🌱) chestnut..seedling {0x1F332, 0x1F333, prEmojiPresentation}, // E1.0 [2] (🌲..🌳) evergreen tree..deciduous tree {0x1F334, 0x1F335, prEmojiPresentation}, // E0.6 [2] (🌴..🌵) palm tree..cactus {0x1F337, 0x1F34A, prEmojiPresentation}, // E0.6 [20] (🌷..🍊) tulip..tangerine {0x1F34B, 0x1F34B, prEmojiPresentation}, // E1.0 [1] (🍋) lemon {0x1F34C, 0x1F34F, prEmojiPresentation}, // E0.6 [4] (🍌..🍏) banana..green apple {0x1F350, 0x1F350, prEmojiPresentation}, // E1.0 [1] (🍐) pear {0x1F351, 0x1F37B, prEmojiPresentation}, // E0.6 [43] (🍑..🍻) peach..clinking beer mugs {0x1F37C, 0x1F37C, prEmojiPresentation}, // E1.0 [1] (🍼) baby bottle {0x1F37E, 0x1F37F, prEmojiPresentation}, // E1.0 [2] (🍾..🍿) bottle with popping cork..popcorn {0x1F380, 0x1F393, prEmojiPresentation}, // E0.6 [20] (🎀..🎓) ribbon..graduation cap {0x1F3A0, 0x1F3C4, prEmojiPresentation}, // E0.6 [37] (🎠..🏄) carousel horse..person surfing {0x1F3C5, 0x1F3C5, prEmojiPresentation}, // E1.0 [1] (🏅) sports medal {0x1F3C6, 0x1F3C6, prEmojiPresentation}, // E0.6 [1] (🏆) trophy {0x1F3C7, 0x1F3C7, prEmojiPresentation}, // E1.0 [1] (🏇) horse racing {0x1F3C8, 0x1F3C8, prEmojiPresentation}, // E0.6 [1] (🏈) american football {0x1F3C9, 0x1F3C9, prEmojiPresentation}, // E1.0 [1] (🏉) rugby football {0x1F3CA, 0x1F3CA, prEmojiPresentation}, // E0.6 [1] (🏊) person swimming {0x1F3CF, 0x1F3D3, prEmojiPresentation}, // E1.0 [5] (🏏..🏓) cricket game..ping pong {0x1F3E0, 0x1F3E3, prEmojiPresentation}, // E0.6 [4] (🏠..🏣) house..Japanese post office {0x1F3E4, 0x1F3E4, prEmojiPresentation}, // E1.0 [1] (🏤) post office {0x1F3E5, 0x1F3F0, prEmojiPresentation}, // E0.6 [12] (🏥..🏰) hospital..castle {0x1F3F4, 0x1F3F4, prEmojiPresentation}, // E1.0 [1] (🏴) black flag {0x1F3F8, 0x1F407, prEmojiPresentation}, // E1.0 [16] (🏸..🐇) badminton..rabbit {0x1F408, 0x1F408, prEmojiPresentation}, // E0.7 [1] (🐈) cat {0x1F409, 0x1F40B, prEmojiPresentation}, // E1.0 [3] (🐉..🐋) dragon..whale {0x1F40C, 0x1F40E, prEmojiPresentation}, // E0.6 [3] (🐌..🐎) snail..horse {0x1F40F, 0x1F410, prEmojiPresentation}, // E1.0 [2] (🐏..🐐) ram..goat {0x1F411, 0x1F412, prEmojiPresentation}, // E0.6 [2] (🐑..🐒) ewe..monkey {0x1F413, 0x1F413, prEmojiPresentation}, // E1.0 [1] (🐓) rooster {0x1F414, 0x1F414, prEmojiPresentation}, // E0.6 [1] (🐔) chicken {0x1F415, 0x1F415, prEmojiPresentation}, // E0.7 [1] (🐕) dog {0x1F416, 0x1F416, prEmojiPresentation}, // E1.0 [1] (🐖) pig {0x1F417, 0x1F429, prEmojiPresentation}, // E0.6 [19] (🐗..🐩) boar..poodle {0x1F42A, 0x1F42A, prEmojiPresentation}, // E1.0 [1] (🐪) camel {0x1F42B, 0x1F43E, prEmojiPresentation}, // E0.6 [20] (🐫..🐾) two-hump camel..paw prints {0x1F440, 0x1F440, prEmojiPresentation}, // E0.6 [1] (👀) eyes {0x1F442, 0x1F464, prEmojiPresentation}, // E0.6 [35] (👂..👤) ear..bust in silhouette {0x1F465, 0x1F465, prEmojiPresentation}, // E1.0 [1] (👥) busts in silhouette {0x1F466, 0x1F46B, prEmojiPresentation}, // E0.6 [6] (👦..👫) boy..woman and man holding hands {0x1F46C, 0x1F46D, prEmojiPresentation}, // E1.0 [2] (👬..👭) men holding hands..women holding hands {0x1F46E, 0x1F4AC, prEmojiPresentation}, // E0.6 [63] (👮..💬) police officer..speech balloon {0x1F4AD, 0x1F4AD, prEmojiPresentation}, // E1.0 [1] (💭) thought balloon {0x1F4AE, 0x1F4B5, prEmojiPresentation}, // E0.6 [8] (💮..💵) white flower..dollar banknote {0x1F4B6, 0x1F4B7, prEmojiPresentation}, // E1.0 [2] (💶..💷) euro banknote..pound banknote {0x1F4B8, 0x1F4EB, prEmojiPresentation}, // E0.6 [52] (💸..📫) money with wings..closed mailbox with raised flag {0x1F4EC, 0x1F4ED, prEmojiPresentation}, // E0.7 [2] (📬..📭) open mailbox with raised flag..open mailbox with lowered flag {0x1F4EE, 0x1F4EE, prEmojiPresentation}, // E0.6 [1] (📮) postbox {0x1F4EF, 0x1F4EF, prEmojiPresentation}, // E1.0 [1] (📯) postal horn {0x1F4F0, 0x1F4F4, prEmojiPresentation}, // E0.6 [5] (📰..📴) newspaper..mobile phone off {0x1F4F5, 0x1F4F5, prEmojiPresentation}, // E1.0 [1] (📵) no mobile phones {0x1F4F6, 0x1F4F7, prEmojiPresentation}, // E0.6 [2] (📶..📷) antenna bars..camera {0x1F4F8, 0x1F4F8, prEmojiPresentation}, // E1.0 [1] (📸) camera with flash {0x1F4F9, 0x1F4FC, prEmojiPresentation}, // E0.6 [4] (📹..📼) video camera..videocassette {0x1F4FF, 0x1F502, prEmojiPresentation}, // E1.0 [4] (📿..🔂) prayer beads..repeat single button {0x1F503, 0x1F503, prEmojiPresentation}, // E0.6 [1] (🔃) clockwise vertical arrows {0x1F504, 0x1F507, prEmojiPresentation}, // E1.0 [4] (🔄..🔇) counterclockwise arrows button..muted speaker {0x1F508, 0x1F508, prEmojiPresentation}, // E0.7 [1] (🔈) speaker low volume {0x1F509, 0x1F509, prEmojiPresentation}, // E1.0 [1] (🔉) speaker medium volume {0x1F50A, 0x1F514, prEmojiPresentation}, // E0.6 [11] (🔊..🔔) speaker high volume..bell {0x1F515, 0x1F515, prEmojiPresentation}, // E1.0 [1] (🔕) bell with slash {0x1F516, 0x1F52B, prEmojiPresentation}, // E0.6 [22] (🔖..🔫) bookmark..water pistol {0x1F52C, 0x1F52D, prEmojiPresentation}, // E1.0 [2] (🔬..🔭) microscope..telescope {0x1F52E, 0x1F53D, prEmojiPresentation}, // E0.6 [16] (🔮..🔽) crystal ball..downwards button {0x1F54B, 0x1F54E, prEmojiPresentation}, // E1.0 [4] (🕋..🕎) kaaba..menorah {0x1F550, 0x1F55B, prEmojiPresentation}, // E0.6 [12] (🕐..🕛) one o’clock..twelve o’clock {0x1F55C, 0x1F567, prEmojiPresentation}, // E0.7 [12] (🕜..🕧) one-thirty..twelve-thirty {0x1F57A, 0x1F57A, prEmojiPresentation}, // E3.0 [1] (🕺) man dancing {0x1F595, 0x1F596, prEmojiPresentation}, // E1.0 [2] (🖕..🖖) middle finger..vulcan salute {0x1F5A4, 0x1F5A4, prEmojiPresentation}, // E3.0 [1] (🖤) black heart {0x1F5FB, 0x1F5FF, prEmojiPresentation}, // E0.6 [5] (🗻..🗿) mount fuji..moai {0x1F600, 0x1F600, prEmojiPresentation}, // E1.0 [1] (😀) grinning face {0x1F601, 0x1F606, prEmojiPresentation}, // E0.6 [6] (😁..😆) beaming face with smiling eyes..grinning squinting face {0x1F607, 0x1F608, prEmojiPresentation}, // E1.0 [2] (😇..😈) smiling face with halo..smiling face with horns {0x1F609, 0x1F60D, prEmojiPresentation}, // E0.6 [5] (😉..😍) winking face..smiling face with heart-eyes {0x1F60E, 0x1F60E, prEmojiPresentation}, // E1.0 [1] (😎) smiling face with sunglasses {0x1F60F, 0x1F60F, prEmojiPresentation}, // E0.6 [1] (😏) smirking face {0x1F610, 0x1F610, prEmojiPresentation}, // E0.7 [1] (😐) neutral face {0x1F611, 0x1F611, prEmojiPresentation}, // E1.0 [1] (😑) expressionless face {0x1F612, 0x1F614, prEmojiPresentation}, // E0.6 [3] (😒..😔) unamused face..pensive face {0x1F615, 0x1F615, prEmojiPresentation}, // E1.0 [1] (😕) confused face {0x1F616, 0x1F616, prEmojiPresentation}, // E0.6 [1] (😖) confounded face {0x1F617, 0x1F617, prEmojiPresentation}, // E1.0 [1] (😗) kissing face {0x1F618, 0x1F618, prEmojiPresentation}, // E0.6 [1] (😘) face blowing a kiss {0x1F619, 0x1F619, prEmojiPresentation}, // E1.0 [1] (😙) kissing face with smiling eyes {0x1F61A, 0x1F61A, prEmojiPresentation}, // E0.6 [1] (😚) kissing face with closed eyes {0x1F61B, 0x1F61B, prEmojiPresentation}, // E1.0 [1] (😛) face with tongue {0x1F61C, 0x1F61E, prEmojiPresentation}, // E0.6 [3] (😜..😞) winking face with tongue..disappointed face {0x1F61F, 0x1F61F, prEmojiPresentation}, // E1.0 [1] (😟) worried face {0x1F620, 0x1F625, prEmojiPresentation}, // E0.6 [6] (😠..😥) angry face..sad but relieved face {0x1F626, 0x1F627, prEmojiPresentation}, // E1.0 [2] (😦..😧) frowning face with open mouth..anguished face {0x1F628, 0x1F62B, prEmojiPresentation}, // E0.6 [4] (😨..😫) fearful face..tired face {0x1F62C, 0x1F62C, prEmojiPresentation}, // E1.0 [1] (😬) grimacing face {0x1F62D, 0x1F62D, prEmojiPresentation}, // E0.6 [1] (😭) loudly crying face {0x1F62E, 0x1F62F, prEmojiPresentation}, // E1.0 [2] (😮..😯) face with open mouth..hushed face {0x1F630, 0x1F633, prEmojiPresentation}, // E0.6 [4] (😰..😳) anxious face with sweat..flushed face {0x1F634, 0x1F634, prEmojiPresentation}, // E1.0 [1] (😴) sleeping face {0x1F635, 0x1F635, prEmojiPresentation}, // E0.6 [1] (😵) face with crossed-out eyes {0x1F636, 0x1F636, prEmojiPresentation}, // E1.0 [1] (😶) face without mouth {0x1F637, 0x1F640, prEmojiPresentation}, // E0.6 [10] (😷..🙀) face with medical mask..weary cat {0x1F641, 0x1F644, prEmojiPresentation}, // E1.0 [4] (🙁..🙄) slightly frowning face..face with rolling eyes {0x1F645, 0x1F64F, prEmojiPresentation}, // E0.6 [11] (🙅..🙏) person gesturing NO..folded hands {0x1F680, 0x1F680, prEmojiPresentation}, // E0.6 [1] (🚀) rocket {0x1F681, 0x1F682, prEmojiPresentation}, // E1.0 [2] (🚁..🚂) helicopter..locomotive {0x1F683, 0x1F685, prEmojiPresentation}, // E0.6 [3] (🚃..🚅) railway car..bullet train {0x1F686, 0x1F686, prEmojiPresentation}, // E1.0 [1] (🚆) train {0x1F687, 0x1F687, prEmojiPresentation}, // E0.6 [1] (🚇) metro {0x1F688, 0x1F688, prEmojiPresentation}, // E1.0 [1] (🚈) light rail {0x1F689, 0x1F689, prEmojiPresentation}, // E0.6 [1] (🚉) station {0x1F68A, 0x1F68B, prEmojiPresentation}, // E1.0 [2] (🚊..🚋) tram..tram car {0x1F68C, 0x1F68C, prEmojiPresentation}, // E0.6 [1] (🚌) bus {0x1F68D, 0x1F68D, prEmojiPresentation}, // E0.7 [1] (🚍) oncoming bus {0x1F68E, 0x1F68E, prEmojiPresentation}, // E1.0 [1] (🚎) trolleybus {0x1F68F, 0x1F68F, prEmojiPresentation}, // E0.6 [1] (🚏) bus stop {0x1F690, 0x1F690, prEmojiPresentation}, // E1.0 [1] (🚐) minibus {0x1F691, 0x1F693, prEmojiPresentation}, // E0.6 [3] (🚑..🚓) ambulance..police car {0x1F694, 0x1F694, prEmojiPresentation}, // E0.7 [1] (🚔) oncoming police car {0x1F695, 0x1F695, prEmojiPresentation}, // E0.6 [1] (🚕) taxi {0x1F696, 0x1F696, prEmojiPresentation}, // E1.0 [1] (🚖) oncoming taxi {0x1F697, 0x1F697, prEmojiPresentation}, // E0.6 [1] (🚗) automobile {0x1F698, 0x1F698, prEmojiPresentation}, // E0.7 [1] (🚘) oncoming automobile {0x1F699, 0x1F69A, prEmojiPresentation}, // E0.6 [2] (🚙..🚚) sport utility vehicle..delivery truck {0x1F69B, 0x1F6A1, prEmojiPresentation}, // E1.0 [7] (🚛..🚡) articulated lorry..aerial tramway {0x1F6A2, 0x1F6A2, prEmojiPresentation}, // E0.6 [1] (🚢) ship {0x1F6A3, 0x1F6A3, prEmojiPresentation}, // E1.0 [1] (🚣) person rowing boat {0x1F6A4, 0x1F6A5, prEmojiPresentation}, // E0.6 [2] (🚤..🚥) speedboat..horizontal traffic light {0x1F6A6, 0x1F6A6, prEmojiPresentation}, // E1.0 [1] (🚦) vertical traffic light {0x1F6A7, 0x1F6AD, prEmojiPresentation}, // E0.6 [7] (🚧..🚭) construction..no smoking {0x1F6AE, 0x1F6B1, prEmojiPresentation}, // E1.0 [4] (🚮..🚱) litter in bin sign..non-potable water {0x1F6B2, 0x1F6B2, prEmojiPresentation}, // E0.6 [1] (🚲) bicycle {0x1F6B3, 0x1F6B5, prEmojiPresentation}, // E1.0 [3] (🚳..🚵) no bicycles..person mountain biking {0x1F6B6, 0x1F6B6, prEmojiPresentation}, // E0.6 [1] (🚶) person walking {0x1F6B7, 0x1F6B8, prEmojiPresentation}, // E1.0 [2] (🚷..🚸) no pedestrians..children crossing {0x1F6B9, 0x1F6BE, prEmojiPresentation}, // E0.6 [6] (🚹..🚾) men’s room..water closet {0x1F6BF, 0x1F6BF, prEmojiPresentation}, // E1.0 [1] (🚿) shower {0x1F6C0, 0x1F6C0, prEmojiPresentation}, // E0.6 [1] (🛀) person taking bath {0x1F6C1, 0x1F6C5, prEmojiPresentation}, // E1.0 [5] (🛁..🛅) bathtub..left luggage {0x1F6CC, 0x1F6CC, prEmojiPresentation}, // E1.0 [1] (🛌) person in bed {0x1F6D0, 0x1F6D0, prEmojiPresentation}, // E1.0 [1] (🛐) place of worship {0x1F6D1, 0x1F6D2, prEmojiPresentation}, // E3.0 [2] (🛑..🛒) stop sign..shopping cart {0x1F6D5, 0x1F6D5, prEmojiPresentation}, // E12.0 [1] (🛕) hindu temple {0x1F6D6, 0x1F6D7, prEmojiPresentation}, // E13.0 [2] (🛖..🛗) hut..elevator {0x1F6DC, 0x1F6DC, prEmojiPresentation}, // E15.0 [1] (🛜) wireless {0x1F6DD, 0x1F6DF, prEmojiPresentation}, // E14.0 [3] (🛝..🛟) playground slide..ring buoy {0x1F6EB, 0x1F6EC, prEmojiPresentation}, // E1.0 [2] (🛫..🛬) airplane departure..airplane arrival {0x1F6F4, 0x1F6F6, prEmojiPresentation}, // E3.0 [3] (🛴..🛶) kick scooter..canoe {0x1F6F7, 0x1F6F8, prEmojiPresentation}, // E5.0 [2] (🛷..🛸) sled..flying saucer {0x1F6F9, 0x1F6F9, prEmojiPresentation}, // E11.0 [1] (🛹) skateboard {0x1F6FA, 0x1F6FA, prEmojiPresentation}, // E12.0 [1] (🛺) auto rickshaw {0x1F6FB, 0x1F6FC, prEmojiPresentation}, // E13.0 [2] (🛻..🛼) pickup truck..roller skate {0x1F7E0, 0x1F7EB, prEmojiPresentation}, // E12.0 [12] (🟠..🟫) orange circle..brown square {0x1F7F0, 0x1F7F0, prEmojiPresentation}, // E14.0 [1] (🟰) heavy equals sign {0x1F90C, 0x1F90C, prEmojiPresentation}, // E13.0 [1] (🤌) pinched fingers {0x1F90D, 0x1F90F, prEmojiPresentation}, // E12.0 [3] (🤍..🤏) white heart..pinching hand {0x1F910, 0x1F918, prEmojiPresentation}, // E1.0 [9] (🤐..🤘) zipper-mouth face..sign of the horns {0x1F919, 0x1F91E, prEmojiPresentation}, // E3.0 [6] (🤙..🤞) call me hand..crossed fingers {0x1F91F, 0x1F91F, prEmojiPresentation}, // E5.0 [1] (🤟) love-you gesture {0x1F920, 0x1F927, prEmojiPresentation}, // E3.0 [8] (🤠..🤧) cowboy hat face..sneezing face {0x1F928, 0x1F92F, prEmojiPresentation}, // E5.0 [8] (🤨..🤯) face with raised eyebrow..exploding head {0x1F930, 0x1F930, prEmojiPresentation}, // E3.0 [1] (🤰) pregnant woman {0x1F931, 0x1F932, prEmojiPresentation}, // E5.0 [2] (🤱..🤲) breast-feeding..palms up together {0x1F933, 0x1F93A, prEmojiPresentation}, // E3.0 [8] (🤳..🤺) selfie..person fencing {0x1F93C, 0x1F93E, prEmojiPresentation}, // E3.0 [3] (🤼..🤾) people wrestling..person playing handball {0x1F93F, 0x1F93F, prEmojiPresentation}, // E12.0 [1] (🤿) diving mask {0x1F940, 0x1F945, prEmojiPresentation}, // E3.0 [6] (🥀..🥅) wilted flower..goal net {0x1F947, 0x1F94B, prEmojiPresentation}, // E3.0 [5] (🥇..🥋) 1st place medal..martial arts uniform {0x1F94C, 0x1F94C, prEmojiPresentation}, // E5.0 [1] (🥌) curling stone {0x1F94D, 0x1F94F, prEmojiPresentation}, // E11.0 [3] (🥍..🥏) lacrosse..flying disc {0x1F950, 0x1F95E, prEmojiPresentation}, // E3.0 [15] (🥐..🥞) croissant..pancakes {0x1F95F, 0x1F96B, prEmojiPresentation}, // E5.0 [13] (🥟..🥫) dumpling..canned food {0x1F96C, 0x1F970, prEmojiPresentation}, // E11.0 [5] (🥬..🥰) leafy green..smiling face with hearts {0x1F971, 0x1F971, prEmojiPresentation}, // E12.0 [1] (🥱) yawning face {0x1F972, 0x1F972, prEmojiPresentation}, // E13.0 [1] (🥲) smiling face with tear {0x1F973, 0x1F976, prEmojiPresentation}, // E11.0 [4] (🥳..🥶) partying face..cold face {0x1F977, 0x1F978, prEmojiPresentation}, // E13.0 [2] (🥷..🥸) ninja..disguised face {0x1F979, 0x1F979, prEmojiPresentation}, // E14.0 [1] (🥹) face holding back tears {0x1F97A, 0x1F97A, prEmojiPresentation}, // E11.0 [1] (🥺) pleading face {0x1F97B, 0x1F97B, prEmojiPresentation}, // E12.0 [1] (🥻) sari {0x1F97C, 0x1F97F, prEmojiPresentation}, // E11.0 [4] (🥼..🥿) lab coat..flat shoe {0x1F980, 0x1F984, prEmojiPresentation}, // E1.0 [5] (🦀..🦄) crab..unicorn {0x1F985, 0x1F991, prEmojiPresentation}, // E3.0 [13] (🦅..🦑) eagle..squid {0x1F992, 0x1F997, prEmojiPresentation}, // E5.0 [6] (🦒..🦗) giraffe..cricket {0x1F998, 0x1F9A2, prEmojiPresentation}, // E11.0 [11] (🦘..🦢) kangaroo..swan {0x1F9A3, 0x1F9A4, prEmojiPresentation}, // E13.0 [2] (🦣..🦤) mammoth..dodo {0x1F9A5, 0x1F9AA, prEmojiPresentation}, // E12.0 [6] (🦥..🦪) sloth..oyster {0x1F9AB, 0x1F9AD, prEmojiPresentation}, // E13.0 [3] (🦫..🦭) beaver..seal {0x1F9AE, 0x1F9AF, prEmojiPresentation}, // E12.0 [2] (🦮..🦯) guide dog..white cane {0x1F9B0, 0x1F9B9, prEmojiPresentation}, // E11.0 [10] (🦰..🦹) red hair..supervillain {0x1F9BA, 0x1F9BF, prEmojiPresentation}, // E12.0 [6] (🦺..🦿) safety vest..mechanical leg {0x1F9C0, 0x1F9C0, prEmojiPresentation}, // E1.0 [1] (🧀) cheese wedge {0x1F9C1, 0x1F9C2, prEmojiPresentation}, // E11.0 [2] (🧁..🧂) cupcake..salt {0x1F9C3, 0x1F9CA, prEmojiPresentation}, // E12.0 [8] (🧃..🧊) beverage box..ice {0x1F9CB, 0x1F9CB, prEmojiPresentation}, // E13.0 [1] (🧋) bubble tea {0x1F9CC, 0x1F9CC, prEmojiPresentation}, // E14.0 [1] (🧌) troll {0x1F9CD, 0x1F9CF, prEmojiPresentation}, // E12.0 [3] (🧍..🧏) person standing..deaf person {0x1F9D0, 0x1F9E6, prEmojiPresentation}, // E5.0 [23] (🧐..🧦) face with monocle..socks {0x1F9E7, 0x1F9FF, prEmojiPresentation}, // E11.0 [25] (🧧..🧿) red envelope..nazar amulet {0x1FA70, 0x1FA73, prEmojiPresentation}, // E12.0 [4] (🩰..🩳) ballet shoes..shorts {0x1FA74, 0x1FA74, prEmojiPresentation}, // E13.0 [1] (🩴) thong sandal {0x1FA75, 0x1FA77, prEmojiPresentation}, // E15.0 [3] (🩵..🩷) light blue heart..pink heart {0x1FA78, 0x1FA7A, prEmojiPresentation}, // E12.0 [3] (🩸..🩺) drop of blood..stethoscope {0x1FA7B, 0x1FA7C, prEmojiPresentation}, // E14.0 [2] (🩻..🩼) x-ray..crutch {0x1FA80, 0x1FA82, prEmojiPresentation}, // E12.0 [3] (🪀..🪂) yo-yo..parachute {0x1FA83, 0x1FA86, prEmojiPresentation}, // E13.0 [4] (🪃..🪆) boomerang..nesting dolls {0x1FA87, 0x1FA88, prEmojiPresentation}, // E15.0 [2] (🪇..🪈) maracas..flute {0x1FA90, 0x1FA95, prEmojiPresentation}, // E12.0 [6] (🪐..🪕) ringed planet..banjo {0x1FA96, 0x1FAA8, prEmojiPresentation}, // E13.0 [19] (🪖..🪨) military helmet..rock {0x1FAA9, 0x1FAAC, prEmojiPresentation}, // E14.0 [4] (🪩..🪬) mirror ball..hamsa {0x1FAAD, 0x1FAAF, prEmojiPresentation}, // E15.0 [3] (🪭..🪯) folding hand fan..khanda {0x1FAB0, 0x1FAB6, prEmojiPresentation}, // E13.0 [7] (🪰..🪶) fly..feather {0x1FAB7, 0x1FABA, prEmojiPresentation}, // E14.0 [4] (🪷..🪺) lotus..nest with eggs {0x1FABB, 0x1FABD, prEmojiPresentation}, // E15.0 [3] (🪻..🪽) hyacinth..wing {0x1FABF, 0x1FABF, prEmojiPresentation}, // E15.0 [1] (🪿) goose {0x1FAC0, 0x1FAC2, prEmojiPresentation}, // E13.0 [3] (🫀..🫂) anatomical heart..people hugging {0x1FAC3, 0x1FAC5, prEmojiPresentation}, // E14.0 [3] (🫃..🫅) pregnant man..person with crown {0x1FACE, 0x1FACF, prEmojiPresentation}, // E15.0 [2] (🫎..🫏) moose..donkey {0x1FAD0, 0x1FAD6, prEmojiPresentation}, // E13.0 [7] (🫐..🫖) blueberries..teapot {0x1FAD7, 0x1FAD9, prEmojiPresentation}, // E14.0 [3] (🫗..🫙) pouring liquid..jar {0x1FADA, 0x1FADB, prEmojiPresentation}, // E15.0 [2] (🫚..🫛) ginger root..pea pod {0x1FAE0, 0x1FAE7, prEmojiPresentation}, // E14.0 [8] (🫠..🫧) melting face..bubbles {0x1FAE8, 0x1FAE8, prEmojiPresentation}, // E15.0 [1] (🫨) shaking face {0x1FAF0, 0x1FAF6, prEmojiPresentation}, // E14.0 [7] (🫰..🫶) hand with index finger and thumb crossed..heart hands {0x1FAF7, 0x1FAF8, prEmojiPresentation}, // E15.0 [2] (🫷..🫸) leftwards pushing hand..rightwards pushing hand }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/rivo/uniseg/linerules.go
vendor/github.com/rivo/uniseg/linerules.go
package uniseg import "unicode/utf8" // The states of the line break parser. const ( lbAny = iota lbBK lbCR lbLF lbNL lbSP lbZW lbWJ lbGL lbBA lbHY lbCL lbCP lbEX lbIS lbSY lbOP lbQU lbQUSP lbNS lbCLCPSP lbB2 lbB2SP lbCB lbBB lbLB21a lbHL lbAL lbNU lbPR lbEB lbIDEM lbNUNU lbNUSY lbNUIS lbNUCL lbNUCP lbPO lbJL lbJV lbJT lbH2 lbH3 lbOddRI lbEvenRI lbExtPicCn lbZWJBit = 64 lbCPeaFWHBit = 128 ) // These constants define whether a given text may be broken into the next line. // If the break is optional (LineCanBreak), you may choose to break or not based // on your own criteria, for example, if the text has reached the available // width. const ( LineDontBreak = iota // You may not break the line here. LineCanBreak // You may or may not break the line here. LineMustBreak // You must break the line here. ) // lbTransitions implements the line break parser's state transitions. It's // anologous to [grTransitions], see comments there for details. // // Unicode version 15.0.0. func lbTransitions(state, prop int) (newState, lineBreak, rule int) { switch uint64(state) | uint64(prop)<<32 { // LB4. case lbBK | prAny<<32: return lbAny, LineMustBreak, 40 // LB5. case lbCR | prLF<<32: return lbLF, LineDontBreak, 50 case lbCR | prAny<<32: return lbAny, LineMustBreak, 50 case lbLF | prAny<<32: return lbAny, LineMustBreak, 50 case lbNL | prAny<<32: return lbAny, LineMustBreak, 50 // LB6. case lbAny | prBK<<32: return lbBK, LineDontBreak, 60 case lbAny | prCR<<32: return lbCR, LineDontBreak, 60 case lbAny | prLF<<32: return lbLF, LineDontBreak, 60 case lbAny | prNL<<32: return lbNL, LineDontBreak, 60 // LB7. case lbAny | prSP<<32: return lbSP, LineDontBreak, 70 case lbAny | prZW<<32: return lbZW, LineDontBreak, 70 // LB8. case lbZW | prSP<<32: return lbZW, LineDontBreak, 70 case lbZW | prAny<<32: return lbAny, LineCanBreak, 80 // LB11. case lbAny | prWJ<<32: return lbWJ, LineDontBreak, 110 case lbWJ | prAny<<32: return lbAny, LineDontBreak, 110 // LB12. case lbAny | prGL<<32: return lbGL, LineCanBreak, 310 case lbGL | prAny<<32: return lbAny, LineDontBreak, 120 // LB13 (simple transitions). case lbAny | prCL<<32: return lbCL, LineCanBreak, 310 case lbAny | prCP<<32: return lbCP, LineCanBreak, 310 case lbAny | prEX<<32: return lbEX, LineDontBreak, 130 case lbAny | prIS<<32: return lbIS, LineCanBreak, 310 case lbAny | prSY<<32: return lbSY, LineCanBreak, 310 // LB14. case lbAny | prOP<<32: return lbOP, LineCanBreak, 310 case lbOP | prSP<<32: return lbOP, LineDontBreak, 70 case lbOP | prAny<<32: return lbAny, LineDontBreak, 140 // LB15. case lbQU | prSP<<32: return lbQUSP, LineDontBreak, 70 case lbQU | prOP<<32: return lbOP, LineDontBreak, 150 case lbQUSP | prOP<<32: return lbOP, LineDontBreak, 150 // LB16. case lbCL | prSP<<32: return lbCLCPSP, LineDontBreak, 70 case lbNUCL | prSP<<32: return lbCLCPSP, LineDontBreak, 70 case lbCP | prSP<<32: return lbCLCPSP, LineDontBreak, 70 case lbNUCP | prSP<<32: return lbCLCPSP, LineDontBreak, 70 case lbCL | prNS<<32: return lbNS, LineDontBreak, 160 case lbNUCL | prNS<<32: return lbNS, LineDontBreak, 160 case lbCP | prNS<<32: return lbNS, LineDontBreak, 160 case lbNUCP | prNS<<32: return lbNS, LineDontBreak, 160 case lbCLCPSP | prNS<<32: return lbNS, LineDontBreak, 160 // LB17. case lbAny | prB2<<32: return lbB2, LineCanBreak, 310 case lbB2 | prSP<<32: return lbB2SP, LineDontBreak, 70 case lbB2 | prB2<<32: return lbB2, LineDontBreak, 170 case lbB2SP | prB2<<32: return lbB2, LineDontBreak, 170 // LB18. case lbSP | prAny<<32: return lbAny, LineCanBreak, 180 case lbQUSP | prAny<<32: return lbAny, LineCanBreak, 180 case lbCLCPSP | prAny<<32: return lbAny, LineCanBreak, 180 case lbB2SP | prAny<<32: return lbAny, LineCanBreak, 180 // LB19. case lbAny | prQU<<32: return lbQU, LineDontBreak, 190 case lbQU | prAny<<32: return lbAny, LineDontBreak, 190 // LB20. case lbAny | prCB<<32: return lbCB, LineCanBreak, 200 case lbCB | prAny<<32: return lbAny, LineCanBreak, 200 // LB21. case lbAny | prBA<<32: return lbBA, LineDontBreak, 210 case lbAny | prHY<<32: return lbHY, LineDontBreak, 210 case lbAny | prNS<<32: return lbNS, LineDontBreak, 210 case lbAny | prBB<<32: return lbBB, LineCanBreak, 310 case lbBB | prAny<<32: return lbAny, LineDontBreak, 210 // LB21a. case lbAny | prHL<<32: return lbHL, LineCanBreak, 310 case lbHL | prHY<<32: return lbLB21a, LineDontBreak, 210 case lbHL | prBA<<32: return lbLB21a, LineDontBreak, 210 case lbLB21a | prAny<<32: return lbAny, LineDontBreak, 211 // LB21b. case lbSY | prHL<<32: return lbHL, LineDontBreak, 212 case lbNUSY | prHL<<32: return lbHL, LineDontBreak, 212 // LB22. case lbAny | prIN<<32: return lbAny, LineDontBreak, 220 // LB23. case lbAny | prAL<<32: return lbAL, LineCanBreak, 310 case lbAny | prNU<<32: return lbNU, LineCanBreak, 310 case lbAL | prNU<<32: return lbNU, LineDontBreak, 230 case lbHL | prNU<<32: return lbNU, LineDontBreak, 230 case lbNU | prAL<<32: return lbAL, LineDontBreak, 230 case lbNU | prHL<<32: return lbHL, LineDontBreak, 230 case lbNUNU | prAL<<32: return lbAL, LineDontBreak, 230 case lbNUNU | prHL<<32: return lbHL, LineDontBreak, 230 // LB23a. case lbAny | prPR<<32: return lbPR, LineCanBreak, 310 case lbAny | prID<<32: return lbIDEM, LineCanBreak, 310 case lbAny | prEB<<32: return lbEB, LineCanBreak, 310 case lbAny | prEM<<32: return lbIDEM, LineCanBreak, 310 case lbPR | prID<<32: return lbIDEM, LineDontBreak, 231 case lbPR | prEB<<32: return lbEB, LineDontBreak, 231 case lbPR | prEM<<32: return lbIDEM, LineDontBreak, 231 case lbIDEM | prPO<<32: return lbPO, LineDontBreak, 231 case lbEB | prPO<<32: return lbPO, LineDontBreak, 231 // LB24. case lbAny | prPO<<32: return lbPO, LineCanBreak, 310 case lbPR | prAL<<32: return lbAL, LineDontBreak, 240 case lbPR | prHL<<32: return lbHL, LineDontBreak, 240 case lbPO | prAL<<32: return lbAL, LineDontBreak, 240 case lbPO | prHL<<32: return lbHL, LineDontBreak, 240 case lbAL | prPR<<32: return lbPR, LineDontBreak, 240 case lbAL | prPO<<32: return lbPO, LineDontBreak, 240 case lbHL | prPR<<32: return lbPR, LineDontBreak, 240 case lbHL | prPO<<32: return lbPO, LineDontBreak, 240 // LB25 (simple transitions). case lbPR | prNU<<32: return lbNU, LineDontBreak, 250 case lbPO | prNU<<32: return lbNU, LineDontBreak, 250 case lbOP | prNU<<32: return lbNU, LineDontBreak, 250 case lbHY | prNU<<32: return lbNU, LineDontBreak, 250 case lbNU | prNU<<32: return lbNUNU, LineDontBreak, 250 case lbNU | prSY<<32: return lbNUSY, LineDontBreak, 250 case lbNU | prIS<<32: return lbNUIS, LineDontBreak, 250 case lbNUNU | prNU<<32: return lbNUNU, LineDontBreak, 250 case lbNUNU | prSY<<32: return lbNUSY, LineDontBreak, 250 case lbNUNU | prIS<<32: return lbNUIS, LineDontBreak, 250 case lbNUSY | prNU<<32: return lbNUNU, LineDontBreak, 250 case lbNUSY | prSY<<32: return lbNUSY, LineDontBreak, 250 case lbNUSY | prIS<<32: return lbNUIS, LineDontBreak, 250 case lbNUIS | prNU<<32: return lbNUNU, LineDontBreak, 250 case lbNUIS | prSY<<32: return lbNUSY, LineDontBreak, 250 case lbNUIS | prIS<<32: return lbNUIS, LineDontBreak, 250 case lbNU | prCL<<32: return lbNUCL, LineDontBreak, 250 case lbNU | prCP<<32: return lbNUCP, LineDontBreak, 250 case lbNUNU | prCL<<32: return lbNUCL, LineDontBreak, 250 case lbNUNU | prCP<<32: return lbNUCP, LineDontBreak, 250 case lbNUSY | prCL<<32: return lbNUCL, LineDontBreak, 250 case lbNUSY | prCP<<32: return lbNUCP, LineDontBreak, 250 case lbNUIS | prCL<<32: return lbNUCL, LineDontBreak, 250 case lbNUIS | prCP<<32: return lbNUCP, LineDontBreak, 250 case lbNU | prPO<<32: return lbPO, LineDontBreak, 250 case lbNUNU | prPO<<32: return lbPO, LineDontBreak, 250 case lbNUSY | prPO<<32: return lbPO, LineDontBreak, 250 case lbNUIS | prPO<<32: return lbPO, LineDontBreak, 250 case lbNUCL | prPO<<32: return lbPO, LineDontBreak, 250 case lbNUCP | prPO<<32: return lbPO, LineDontBreak, 250 case lbNU | prPR<<32: return lbPR, LineDontBreak, 250 case lbNUNU | prPR<<32: return lbPR, LineDontBreak, 250 case lbNUSY | prPR<<32: return lbPR, LineDontBreak, 250 case lbNUIS | prPR<<32: return lbPR, LineDontBreak, 250 case lbNUCL | prPR<<32: return lbPR, LineDontBreak, 250 case lbNUCP | prPR<<32: return lbPR, LineDontBreak, 250 // LB26. case lbAny | prJL<<32: return lbJL, LineCanBreak, 310 case lbAny | prJV<<32: return lbJV, LineCanBreak, 310 case lbAny | prJT<<32: return lbJT, LineCanBreak, 310 case lbAny | prH2<<32: return lbH2, LineCanBreak, 310 case lbAny | prH3<<32: return lbH3, LineCanBreak, 310 case lbJL | prJL<<32: return lbJL, LineDontBreak, 260 case lbJL | prJV<<32: return lbJV, LineDontBreak, 260 case lbJL | prH2<<32: return lbH2, LineDontBreak, 260 case lbJL | prH3<<32: return lbH3, LineDontBreak, 260 case lbJV | prJV<<32: return lbJV, LineDontBreak, 260 case lbJV | prJT<<32: return lbJT, LineDontBreak, 260 case lbH2 | prJV<<32: return lbJV, LineDontBreak, 260 case lbH2 | prJT<<32: return lbJT, LineDontBreak, 260 case lbJT | prJT<<32: return lbJT, LineDontBreak, 260 case lbH3 | prJT<<32: return lbJT, LineDontBreak, 260 // LB27. case lbJL | prPO<<32: return lbPO, LineDontBreak, 270 case lbJV | prPO<<32: return lbPO, LineDontBreak, 270 case lbJT | prPO<<32: return lbPO, LineDontBreak, 270 case lbH2 | prPO<<32: return lbPO, LineDontBreak, 270 case lbH3 | prPO<<32: return lbPO, LineDontBreak, 270 case lbPR | prJL<<32: return lbJL, LineDontBreak, 270 case lbPR | prJV<<32: return lbJV, LineDontBreak, 270 case lbPR | prJT<<32: return lbJT, LineDontBreak, 270 case lbPR | prH2<<32: return lbH2, LineDontBreak, 270 case lbPR | prH3<<32: return lbH3, LineDontBreak, 270 // LB28. case lbAL | prAL<<32: return lbAL, LineDontBreak, 280 case lbAL | prHL<<32: return lbHL, LineDontBreak, 280 case lbHL | prAL<<32: return lbAL, LineDontBreak, 280 case lbHL | prHL<<32: return lbHL, LineDontBreak, 280 // LB29. case lbIS | prAL<<32: return lbAL, LineDontBreak, 290 case lbIS | prHL<<32: return lbHL, LineDontBreak, 290 case lbNUIS | prAL<<32: return lbAL, LineDontBreak, 290 case lbNUIS | prHL<<32: return lbHL, LineDontBreak, 290 default: return -1, -1, -1 } } // transitionLineBreakState determines the new state of the line break parser // given the current state and the next code point. It also returns the type of // line break: LineDontBreak, LineCanBreak, or LineMustBreak. If more than one // code point is needed to determine the new state, the byte slice or the string // starting after rune "r" can be used (whichever is not nil or empty) for // further lookups. func transitionLineBreakState(state int, r rune, b []byte, str string) (newState int, lineBreak int) { // Determine the property of the next character. nextProperty, generalCategory := propertyLineBreak(r) // Prepare. var forceNoBreak, isCPeaFWH bool if state >= 0 && state&lbCPeaFWHBit != 0 { isCPeaFWH = true // LB30: CP but ea is not F, W, or H. state = state &^ lbCPeaFWHBit } if state >= 0 && state&lbZWJBit != 0 { state = state &^ lbZWJBit // Extract zero-width joiner bit. forceNoBreak = true // LB8a. } defer func() { // Transition into LB30. if newState == lbCP || newState == lbNUCP { ea := propertyEastAsianWidth(r) if ea != prF && ea != prW && ea != prH { newState |= lbCPeaFWHBit } } // Override break. if forceNoBreak { lineBreak = LineDontBreak } }() // LB1. if nextProperty == prAI || nextProperty == prSG || nextProperty == prXX { nextProperty = prAL } else if nextProperty == prSA { if generalCategory == gcMn || generalCategory == gcMc { nextProperty = prCM } else { nextProperty = prAL } } else if nextProperty == prCJ { nextProperty = prNS } // Combining marks. if nextProperty == prZWJ || nextProperty == prCM { var bit int if nextProperty == prZWJ { bit = lbZWJBit } mustBreakState := state < 0 || state == lbBK || state == lbCR || state == lbLF || state == lbNL if !mustBreakState && state != lbSP && state != lbZW && state != lbQUSP && state != lbCLCPSP && state != lbB2SP { // LB9. return state | bit, LineDontBreak } else { // LB10. if mustBreakState { return lbAL | bit, LineMustBreak } return lbAL | bit, LineCanBreak } } // Find the applicable transition in the table. var rule int newState, lineBreak, rule = lbTransitions(state, nextProperty) if newState < 0 { // No specific transition found. Try the less specific ones. anyPropProp, anyPropLineBreak, anyPropRule := lbTransitions(state, prAny) anyStateProp, anyStateLineBreak, anyStateRule := lbTransitions(lbAny, nextProperty) if anyPropProp >= 0 && anyStateProp >= 0 { // Both apply. We'll use a mix (see comments for grTransitions). newState, lineBreak, rule = anyStateProp, anyStateLineBreak, anyStateRule if anyPropRule < anyStateRule { lineBreak, rule = anyPropLineBreak, anyPropRule } } else if anyPropProp >= 0 { // We only have a specific state. newState, lineBreak, rule = anyPropProp, anyPropLineBreak, anyPropRule // This branch will probably never be reached because okAnyState will // always be true given the current transition map. But we keep it here // for future modifications to the transition map where this may not be // true anymore. } else if anyStateProp >= 0 { // We only have a specific property. newState, lineBreak, rule = anyStateProp, anyStateLineBreak, anyStateRule } else { // No known transition. LB31: ALL ÷ ALL. newState, lineBreak, rule = lbAny, LineCanBreak, 310 } } // LB12a. if rule > 121 && nextProperty == prGL && (state != lbSP && state != lbBA && state != lbHY && state != lbLB21a && state != lbQUSP && state != lbCLCPSP && state != lbB2SP) { return lbGL, LineDontBreak } // LB13. if rule > 130 && state != lbNU && state != lbNUNU { switch nextProperty { case prCL: return lbCL, LineDontBreak case prCP: return lbCP, LineDontBreak case prIS: return lbIS, LineDontBreak case prSY: return lbSY, LineDontBreak } } // LB25 (look ahead). if rule > 250 && (state == lbPR || state == lbPO) && nextProperty == prOP || nextProperty == prHY { var r rune if b != nil { // Byte slice version. r, _ = utf8.DecodeRune(b) } else { // String version. r, _ = utf8.DecodeRuneInString(str) } if r != utf8.RuneError { pr, _ := propertyLineBreak(r) if pr == prNU { return lbNU, LineDontBreak } } } // LB30 (part one). if rule > 300 { if (state == lbAL || state == lbHL || state == lbNU || state == lbNUNU) && nextProperty == prOP { ea := propertyEastAsianWidth(r) if ea != prF && ea != prW && ea != prH { return lbOP, LineDontBreak } } else if isCPeaFWH { switch nextProperty { case prAL: return lbAL, LineDontBreak case prHL: return lbHL, LineDontBreak case prNU: return lbNU, LineDontBreak } } } // LB30a. if newState == lbAny && nextProperty == prRI { if state != lbOddRI && state != lbEvenRI { // Includes state == -1. // Transition into the first RI. return lbOddRI, lineBreak } if state == lbOddRI { // Don't break pairs of Regional Indicators. return lbEvenRI, LineDontBreak } return lbOddRI, lineBreak } // LB30b. if rule > 302 { if nextProperty == prEM { if state == lbEB || state == lbExtPicCn { return prAny, LineDontBreak } } graphemeProperty := propertyGraphemes(r) if graphemeProperty == prExtendedPictographic && generalCategory == gcCn { return lbExtPicCn, LineCanBreak } } return }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/rivo/uniseg/word.go
vendor/github.com/rivo/uniseg/word.go
package uniseg import "unicode/utf8" // FirstWord returns the first word found in the given byte slice according to // the rules of [Unicode Standard Annex #29, Word Boundaries]. This function can // be called continuously to extract all words from a byte slice, as illustrated // in the example below. // // If you don't know the current state, for example when calling the function // for the first time, you must pass -1. For consecutive calls, pass the state // and rest slice returned by the previous call. // // The "rest" slice is the sub-slice of the original byte slice "b" starting // after the last byte of the identified word. If the length of the "rest" slice // is 0, the entire byte slice "b" has been processed. The "word" byte slice is // the sub-slice of the input slice containing the identified word. // // Given an empty byte slice "b", the function returns nil values. // // [Unicode Standard Annex #29, Word Boundaries]: http://unicode.org/reports/tr29/#Word_Boundaries func FirstWord(b []byte, state int) (word, rest []byte, newState int) { // An empty byte slice returns nothing. if len(b) == 0 { return } // Extract the first rune. r, length := utf8.DecodeRune(b) if len(b) <= length { // If we're already past the end, there is nothing else to parse. return b, nil, wbAny } // If we don't know the state, determine it now. if state < 0 { state, _ = transitionWordBreakState(state, r, b[length:], "") } // Transition until we find a boundary. var boundary bool for { r, l := utf8.DecodeRune(b[length:]) state, boundary = transitionWordBreakState(state, r, b[length+l:], "") if boundary { return b[:length], b[length:], state } length += l if len(b) <= length { return b, nil, wbAny } } } // FirstWordInString is like [FirstWord] but its input and outputs are strings. func FirstWordInString(str string, state int) (word, rest string, newState int) { // An empty byte slice returns nothing. if len(str) == 0 { return } // Extract the first rune. r, length := utf8.DecodeRuneInString(str) if len(str) <= length { // If we're already past the end, there is nothing else to parse. return str, "", wbAny } // If we don't know the state, determine it now. if state < 0 { state, _ = transitionWordBreakState(state, r, nil, str[length:]) } // Transition until we find a boundary. var boundary bool for { r, l := utf8.DecodeRuneInString(str[length:]) state, boundary = transitionWordBreakState(state, r, nil, str[length+l:]) if boundary { return str[:length], str[length:], state } length += l if len(str) <= length { return str, "", wbAny } } }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/rivo/uniseg/sentence.go
vendor/github.com/rivo/uniseg/sentence.go
package uniseg import "unicode/utf8" // FirstSentence returns the first sentence found in the given byte slice // according to the rules of [Unicode Standard Annex #29, Sentence Boundaries]. // This function can be called continuously to extract all sentences from a byte // slice, as illustrated in the example below. // // If you don't know the current state, for example when calling the function // for the first time, you must pass -1. For consecutive calls, pass the state // and rest slice returned by the previous call. // // The "rest" slice is the sub-slice of the original byte slice "b" starting // after the last byte of the identified sentence. If the length of the "rest" // slice is 0, the entire byte slice "b" has been processed. The "sentence" byte // slice is the sub-slice of the input slice containing the identified sentence. // // Given an empty byte slice "b", the function returns nil values. // // [Unicode Standard Annex #29, Sentence Boundaries]: http://unicode.org/reports/tr29/#Sentence_Boundaries func FirstSentence(b []byte, state int) (sentence, rest []byte, newState int) { // An empty byte slice returns nothing. if len(b) == 0 { return } // Extract the first rune. r, length := utf8.DecodeRune(b) if len(b) <= length { // If we're already past the end, there is nothing else to parse. return b, nil, sbAny } // If we don't know the state, determine it now. if state < 0 { state, _ = transitionSentenceBreakState(state, r, b[length:], "") } // Transition until we find a boundary. var boundary bool for { r, l := utf8.DecodeRune(b[length:]) state, boundary = transitionSentenceBreakState(state, r, b[length+l:], "") if boundary { return b[:length], b[length:], state } length += l if len(b) <= length { return b, nil, sbAny } } } // FirstSentenceInString is like [FirstSentence] but its input and outputs are // strings. func FirstSentenceInString(str string, state int) (sentence, rest string, newState int) { // An empty byte slice returns nothing. if len(str) == 0 { return } // Extract the first rune. r, length := utf8.DecodeRuneInString(str) if len(str) <= length { // If we're already past the end, there is nothing else to parse. return str, "", sbAny } // If we don't know the state, determine it now. if state < 0 { state, _ = transitionSentenceBreakState(state, r, nil, str[length:]) } // Transition until we find a boundary. var boundary bool for { r, l := utf8.DecodeRuneInString(str[length:]) state, boundary = transitionSentenceBreakState(state, r, nil, str[length+l:]) if boundary { return str[:length], str[length:], state } length += l if len(str) <= length { return str, "", sbAny } } }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/rivo/uniseg/graphemerules.go
vendor/github.com/rivo/uniseg/graphemerules.go
package uniseg // The states of the grapheme cluster parser. const ( grAny = iota grCR grControlLF grL grLVV grLVTT grPrepend grExtendedPictographic grExtendedPictographicZWJ grRIOdd grRIEven ) // The grapheme cluster parser's breaking instructions. const ( grNoBoundary = iota grBoundary ) // grTransitions implements the grapheme cluster parser's state transitions. // Maps state and property to a new state, a breaking instruction, and rule // number. The breaking instruction always refers to the boundary between the // last and next code point. Returns negative values if no transition is found. // // This function is used as follows: // // 1. Find specific state + specific property. Stop if found. // 2. Find specific state + any property. // 3. Find any state + specific property. // 4. If only (2) or (3) (but not both) was found, stop. // 5. If both (2) and (3) were found, use state from (3) and breaking instruction // from the transition with the lower rule number, prefer (3) if rule numbers // are equal. Stop. // 6. Assume grAny and grBoundary. // // Unicode version 15.0.0. func grTransitions(state, prop int) (newState int, newProp int, boundary int) { // It turns out that using a big switch statement is much faster than using // a map. switch uint64(state) | uint64(prop)<<32 { // GB5 case grAny | prCR<<32: return grCR, grBoundary, 50 case grAny | prLF<<32: return grControlLF, grBoundary, 50 case grAny | prControl<<32: return grControlLF, grBoundary, 50 // GB4 case grCR | prAny<<32: return grAny, grBoundary, 40 case grControlLF | prAny<<32: return grAny, grBoundary, 40 // GB3 case grCR | prLF<<32: return grControlLF, grNoBoundary, 30 // GB6 case grAny | prL<<32: return grL, grBoundary, 9990 case grL | prL<<32: return grL, grNoBoundary, 60 case grL | prV<<32: return grLVV, grNoBoundary, 60 case grL | prLV<<32: return grLVV, grNoBoundary, 60 case grL | prLVT<<32: return grLVTT, grNoBoundary, 60 // GB7 case grAny | prLV<<32: return grLVV, grBoundary, 9990 case grAny | prV<<32: return grLVV, grBoundary, 9990 case grLVV | prV<<32: return grLVV, grNoBoundary, 70 case grLVV | prT<<32: return grLVTT, grNoBoundary, 70 // GB8 case grAny | prLVT<<32: return grLVTT, grBoundary, 9990 case grAny | prT<<32: return grLVTT, grBoundary, 9990 case grLVTT | prT<<32: return grLVTT, grNoBoundary, 80 // GB9 case grAny | prExtend<<32: return grAny, grNoBoundary, 90 case grAny | prZWJ<<32: return grAny, grNoBoundary, 90 // GB9a case grAny | prSpacingMark<<32: return grAny, grNoBoundary, 91 // GB9b case grAny | prPrepend<<32: return grPrepend, grBoundary, 9990 case grPrepend | prAny<<32: return grAny, grNoBoundary, 92 // GB11 case grAny | prExtendedPictographic<<32: return grExtendedPictographic, grBoundary, 9990 case grExtendedPictographic | prExtend<<32: return grExtendedPictographic, grNoBoundary, 110 case grExtendedPictographic | prZWJ<<32: return grExtendedPictographicZWJ, grNoBoundary, 110 case grExtendedPictographicZWJ | prExtendedPictographic<<32: return grExtendedPictographic, grNoBoundary, 110 // GB12 / GB13 case grAny | prRegionalIndicator<<32: return grRIOdd, grBoundary, 9990 case grRIOdd | prRegionalIndicator<<32: return grRIEven, grNoBoundary, 120 case grRIEven | prRegionalIndicator<<32: return grRIOdd, grBoundary, 120 default: return -1, -1, -1 } } // transitionGraphemeState determines the new state of the grapheme cluster // parser given the current state and the next code point. It also returns the // code point's grapheme property (the value mapped by the [graphemeCodePoints] // table) and whether a cluster boundary was detected. func transitionGraphemeState(state int, r rune) (newState, prop int, boundary bool) { // Determine the property of the next character. prop = propertyGraphemes(r) // Find the applicable transition. nextState, nextProp, _ := grTransitions(state, prop) if nextState >= 0 { // We have a specific transition. We'll use it. return nextState, prop, nextProp == grBoundary } // No specific transition found. Try the less specific ones. anyPropState, anyPropProp, anyPropRule := grTransitions(state, prAny) anyStateState, anyStateProp, anyStateRule := grTransitions(grAny, prop) if anyPropState >= 0 && anyStateState >= 0 { // Both apply. We'll use a mix (see comments for grTransitions). newState = anyStateState boundary = anyStateProp == grBoundary if anyPropRule < anyStateRule { boundary = anyPropProp == grBoundary } return } if anyPropState >= 0 { // We only have a specific state. return anyPropState, prop, anyPropProp == grBoundary // This branch will probably never be reached because okAnyState will // always be true given the current transition map. But we keep it here // for future modifications to the transition map where this may not be // true anymore. } if anyStateState >= 0 { // We only have a specific property. return anyStateState, prop, anyStateProp == grBoundary } // No known transition. GB999: Any ÷ Any. return grAny, prop, true }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/rivo/uniseg/lineproperties.go
vendor/github.com/rivo/uniseg/lineproperties.go
// Code generated via go generate from gen_properties.go. DO NOT EDIT. package uniseg // lineBreakCodePoints are taken from // https://www.unicode.org/Public/15.0.0/ucd/LineBreak.txt // and // https://unicode.org/Public/15.0.0/ucd/emoji/emoji-data.txt // ("Extended_Pictographic" only) // on September 5, 2023. See https://www.unicode.org/license.html for the Unicode // license agreement. var lineBreakCodePoints = [][4]int{ {0x0000, 0x0008, prCM, gcCc}, // [9] <control-0000>..<control-0008> {0x0009, 0x0009, prBA, gcCc}, // <control-0009> {0x000A, 0x000A, prLF, gcCc}, // <control-000A> {0x000B, 0x000C, prBK, gcCc}, // [2] <control-000B>..<control-000C> {0x000D, 0x000D, prCR, gcCc}, // <control-000D> {0x000E, 0x001F, prCM, gcCc}, // [18] <control-000E>..<control-001F> {0x0020, 0x0020, prSP, gcZs}, // SPACE {0x0021, 0x0021, prEX, gcPo}, // EXCLAMATION MARK {0x0022, 0x0022, prQU, gcPo}, // QUOTATION MARK {0x0023, 0x0023, prAL, gcPo}, // NUMBER SIGN {0x0024, 0x0024, prPR, gcSc}, // DOLLAR SIGN {0x0025, 0x0025, prPO, gcPo}, // PERCENT SIGN {0x0026, 0x0026, prAL, gcPo}, // AMPERSAND {0x0027, 0x0027, prQU, gcPo}, // APOSTROPHE {0x0028, 0x0028, prOP, gcPs}, // LEFT PARENTHESIS {0x0029, 0x0029, prCP, gcPe}, // RIGHT PARENTHESIS {0x002A, 0x002A, prAL, gcPo}, // ASTERISK {0x002B, 0x002B, prPR, gcSm}, // PLUS SIGN {0x002C, 0x002C, prIS, gcPo}, // COMMA {0x002D, 0x002D, prHY, gcPd}, // HYPHEN-MINUS {0x002E, 0x002E, prIS, gcPo}, // FULL STOP {0x002F, 0x002F, prSY, gcPo}, // SOLIDUS {0x0030, 0x0039, prNU, gcNd}, // [10] DIGIT ZERO..DIGIT NINE {0x003A, 0x003B, prIS, gcPo}, // [2] COLON..SEMICOLON {0x003C, 0x003E, prAL, gcSm}, // [3] LESS-THAN SIGN..GREATER-THAN SIGN {0x003F, 0x003F, prEX, gcPo}, // QUESTION MARK {0x0040, 0x0040, prAL, gcPo}, // COMMERCIAL AT {0x0041, 0x005A, prAL, gcLu}, // [26] LATIN CAPITAL LETTER A..LATIN CAPITAL LETTER Z {0x005B, 0x005B, prOP, gcPs}, // LEFT SQUARE BRACKET {0x005C, 0x005C, prPR, gcPo}, // REVERSE SOLIDUS {0x005D, 0x005D, prCP, gcPe}, // RIGHT SQUARE BRACKET {0x005E, 0x005E, prAL, gcSk}, // CIRCUMFLEX ACCENT {0x005F, 0x005F, prAL, gcPc}, // LOW LINE {0x0060, 0x0060, prAL, gcSk}, // GRAVE ACCENT {0x0061, 0x007A, prAL, gcLl}, // [26] LATIN SMALL LETTER A..LATIN SMALL LETTER Z {0x007B, 0x007B, prOP, gcPs}, // LEFT CURLY BRACKET {0x007C, 0x007C, prBA, gcSm}, // VERTICAL LINE {0x007D, 0x007D, prCL, gcPe}, // RIGHT CURLY BRACKET {0x007E, 0x007E, prAL, gcSm}, // TILDE {0x007F, 0x007F, prCM, gcCc}, // <control-007F> {0x0080, 0x0084, prCM, gcCc}, // [5] <control-0080>..<control-0084> {0x0085, 0x0085, prNL, gcCc}, // <control-0085> {0x0086, 0x009F, prCM, gcCc}, // [26] <control-0086>..<control-009F> {0x00A0, 0x00A0, prGL, gcZs}, // NO-BREAK SPACE {0x00A1, 0x00A1, prOP, gcPo}, // INVERTED EXCLAMATION MARK {0x00A2, 0x00A2, prPO, gcSc}, // CENT SIGN {0x00A3, 0x00A5, prPR, gcSc}, // [3] POUND SIGN..YEN SIGN {0x00A6, 0x00A6, prAL, gcSo}, // BROKEN BAR {0x00A7, 0x00A7, prAI, gcPo}, // SECTION SIGN {0x00A8, 0x00A8, prAI, gcSk}, // DIAERESIS {0x00A9, 0x00A9, prAL, gcSo}, // COPYRIGHT SIGN {0x00AA, 0x00AA, prAI, gcLo}, // FEMININE ORDINAL INDICATOR {0x00AB, 0x00AB, prQU, gcPi}, // LEFT-POINTING DOUBLE ANGLE QUOTATION MARK {0x00AC, 0x00AC, prAL, gcSm}, // NOT SIGN {0x00AD, 0x00AD, prBA, gcCf}, // SOFT HYPHEN {0x00AE, 0x00AE, prAL, gcSo}, // REGISTERED SIGN {0x00AF, 0x00AF, prAL, gcSk}, // MACRON {0x00B0, 0x00B0, prPO, gcSo}, // DEGREE SIGN {0x00B1, 0x00B1, prPR, gcSm}, // PLUS-MINUS SIGN {0x00B2, 0x00B3, prAI, gcNo}, // [2] SUPERSCRIPT TWO..SUPERSCRIPT THREE {0x00B4, 0x00B4, prBB, gcSk}, // ACUTE ACCENT {0x00B5, 0x00B5, prAL, gcLl}, // MICRO SIGN {0x00B6, 0x00B7, prAI, gcPo}, // [2] PILCROW SIGN..MIDDLE DOT {0x00B8, 0x00B8, prAI, gcSk}, // CEDILLA {0x00B9, 0x00B9, prAI, gcNo}, // SUPERSCRIPT ONE {0x00BA, 0x00BA, prAI, gcLo}, // MASCULINE ORDINAL INDICATOR {0x00BB, 0x00BB, prQU, gcPf}, // RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK {0x00BC, 0x00BE, prAI, gcNo}, // [3] VULGAR FRACTION ONE QUARTER..VULGAR FRACTION THREE QUARTERS {0x00BF, 0x00BF, prOP, gcPo}, // INVERTED QUESTION MARK {0x00C0, 0x00D6, prAL, gcLu}, // [23] LATIN CAPITAL LETTER A WITH GRAVE..LATIN CAPITAL LETTER O WITH DIAERESIS {0x00D7, 0x00D7, prAI, gcSm}, // MULTIPLICATION SIGN {0x00D8, 0x00F6, prAL, gcLC}, // [31] LATIN CAPITAL LETTER O WITH STROKE..LATIN SMALL LETTER O WITH DIAERESIS {0x00F7, 0x00F7, prAI, gcSm}, // DIVISION SIGN {0x00F8, 0x00FF, prAL, gcLl}, // [8] LATIN SMALL LETTER O WITH STROKE..LATIN SMALL LETTER Y WITH DIAERESIS {0x0100, 0x017F, prAL, gcLC}, // [128] LATIN CAPITAL LETTER A WITH MACRON..LATIN SMALL LETTER LONG S {0x0180, 0x01BA, prAL, gcLC}, // [59] LATIN SMALL LETTER B WITH STROKE..LATIN SMALL LETTER EZH WITH TAIL {0x01BB, 0x01BB, prAL, gcLo}, // LATIN LETTER TWO WITH STROKE {0x01BC, 0x01BF, prAL, gcLC}, // [4] LATIN CAPITAL LETTER TONE FIVE..LATIN LETTER WYNN {0x01C0, 0x01C3, prAL, gcLo}, // [4] LATIN LETTER DENTAL CLICK..LATIN LETTER RETROFLEX CLICK {0x01C4, 0x024F, prAL, gcLC}, // [140] LATIN CAPITAL LETTER DZ WITH CARON..LATIN SMALL LETTER Y WITH STROKE {0x0250, 0x0293, prAL, gcLl}, // [68] LATIN SMALL LETTER TURNED A..LATIN SMALL LETTER EZH WITH CURL {0x0294, 0x0294, prAL, gcLo}, // LATIN LETTER GLOTTAL STOP {0x0295, 0x02AF, prAL, gcLl}, // [27] LATIN LETTER PHARYNGEAL VOICED FRICATIVE..LATIN SMALL LETTER TURNED H WITH FISHHOOK AND TAIL {0x02B0, 0x02C1, prAL, gcLm}, // [18] MODIFIER LETTER SMALL H..MODIFIER LETTER REVERSED GLOTTAL STOP {0x02C2, 0x02C5, prAL, gcSk}, // [4] MODIFIER LETTER LEFT ARROWHEAD..MODIFIER LETTER DOWN ARROWHEAD {0x02C6, 0x02C6, prAL, gcLm}, // MODIFIER LETTER CIRCUMFLEX ACCENT {0x02C7, 0x02C7, prAI, gcLm}, // CARON {0x02C8, 0x02C8, prBB, gcLm}, // MODIFIER LETTER VERTICAL LINE {0x02C9, 0x02CB, prAI, gcLm}, // [3] MODIFIER LETTER MACRON..MODIFIER LETTER GRAVE ACCENT {0x02CC, 0x02CC, prBB, gcLm}, // MODIFIER LETTER LOW VERTICAL LINE {0x02CD, 0x02CD, prAI, gcLm}, // MODIFIER LETTER LOW MACRON {0x02CE, 0x02CF, prAL, gcLm}, // [2] MODIFIER LETTER LOW GRAVE ACCENT..MODIFIER LETTER LOW ACUTE ACCENT {0x02D0, 0x02D0, prAI, gcLm}, // MODIFIER LETTER TRIANGULAR COLON {0x02D1, 0x02D1, prAL, gcLm}, // MODIFIER LETTER HALF TRIANGULAR COLON {0x02D2, 0x02D7, prAL, gcSk}, // [6] MODIFIER LETTER CENTRED RIGHT HALF RING..MODIFIER LETTER MINUS SIGN {0x02D8, 0x02DB, prAI, gcSk}, // [4] BREVE..OGONEK {0x02DC, 0x02DC, prAL, gcSk}, // SMALL TILDE {0x02DD, 0x02DD, prAI, gcSk}, // DOUBLE ACUTE ACCENT {0x02DE, 0x02DE, prAL, gcSk}, // MODIFIER LETTER RHOTIC HOOK {0x02DF, 0x02DF, prBB, gcSk}, // MODIFIER LETTER CROSS ACCENT {0x02E0, 0x02E4, prAL, gcLm}, // [5] MODIFIER LETTER SMALL GAMMA..MODIFIER LETTER SMALL REVERSED GLOTTAL STOP {0x02E5, 0x02EB, prAL, gcSk}, // [7] MODIFIER LETTER EXTRA-HIGH TONE BAR..MODIFIER LETTER YANG DEPARTING TONE MARK {0x02EC, 0x02EC, prAL, gcLm}, // MODIFIER LETTER VOICING {0x02ED, 0x02ED, prAL, gcSk}, // MODIFIER LETTER UNASPIRATED {0x02EE, 0x02EE, prAL, gcLm}, // MODIFIER LETTER DOUBLE APOSTROPHE {0x02EF, 0x02FF, prAL, gcSk}, // [17] MODIFIER LETTER LOW DOWN ARROWHEAD..MODIFIER LETTER LOW LEFT ARROW {0x0300, 0x034E, prCM, gcMn}, // [79] COMBINING GRAVE ACCENT..COMBINING UPWARDS ARROW BELOW {0x034F, 0x034F, prGL, gcMn}, // COMBINING GRAPHEME JOINER {0x0350, 0x035B, prCM, gcMn}, // [12] COMBINING RIGHT ARROWHEAD ABOVE..COMBINING ZIGZAG ABOVE {0x035C, 0x0362, prGL, gcMn}, // [7] COMBINING DOUBLE BREVE BELOW..COMBINING DOUBLE RIGHTWARDS ARROW BELOW {0x0363, 0x036F, prCM, gcMn}, // [13] COMBINING LATIN SMALL LETTER A..COMBINING LATIN SMALL LETTER X {0x0370, 0x0373, prAL, gcLC}, // [4] GREEK CAPITAL LETTER HETA..GREEK SMALL LETTER ARCHAIC SAMPI {0x0374, 0x0374, prAL, gcLm}, // GREEK NUMERAL SIGN {0x0375, 0x0375, prAL, gcSk}, // GREEK LOWER NUMERAL SIGN {0x0376, 0x0377, prAL, gcLC}, // [2] GREEK CAPITAL LETTER PAMPHYLIAN DIGAMMA..GREEK SMALL LETTER PAMPHYLIAN DIGAMMA {0x037A, 0x037A, prAL, gcLm}, // GREEK YPOGEGRAMMENI {0x037B, 0x037D, prAL, gcLl}, // [3] GREEK SMALL REVERSED LUNATE SIGMA SYMBOL..GREEK SMALL REVERSED DOTTED LUNATE SIGMA SYMBOL {0x037E, 0x037E, prIS, gcPo}, // GREEK QUESTION MARK {0x037F, 0x037F, prAL, gcLu}, // GREEK CAPITAL LETTER YOT {0x0384, 0x0385, prAL, gcSk}, // [2] GREEK TONOS..GREEK DIALYTIKA TONOS {0x0386, 0x0386, prAL, gcLu}, // GREEK CAPITAL LETTER ALPHA WITH TONOS {0x0387, 0x0387, prAL, gcPo}, // GREEK ANO TELEIA {0x0388, 0x038A, prAL, gcLu}, // [3] GREEK CAPITAL LETTER EPSILON WITH TONOS..GREEK CAPITAL LETTER IOTA WITH TONOS {0x038C, 0x038C, prAL, gcLu}, // GREEK CAPITAL LETTER OMICRON WITH TONOS {0x038E, 0x03A1, prAL, gcLC}, // [20] GREEK CAPITAL LETTER UPSILON WITH TONOS..GREEK CAPITAL LETTER RHO {0x03A3, 0x03F5, prAL, gcLC}, // [83] GREEK CAPITAL LETTER SIGMA..GREEK LUNATE EPSILON SYMBOL {0x03F6, 0x03F6, prAL, gcSm}, // GREEK REVERSED LUNATE EPSILON SYMBOL {0x03F7, 0x03FF, prAL, gcLC}, // [9] GREEK CAPITAL LETTER SHO..GREEK CAPITAL REVERSED DOTTED LUNATE SIGMA SYMBOL {0x0400, 0x0481, prAL, gcLC}, // [130] CYRILLIC CAPITAL LETTER IE WITH GRAVE..CYRILLIC SMALL LETTER KOPPA {0x0482, 0x0482, prAL, gcSo}, // CYRILLIC THOUSANDS SIGN {0x0483, 0x0487, prCM, gcMn}, // [5] COMBINING CYRILLIC TITLO..COMBINING CYRILLIC POKRYTIE {0x0488, 0x0489, prCM, gcMe}, // [2] COMBINING CYRILLIC HUNDRED THOUSANDS SIGN..COMBINING CYRILLIC MILLIONS SIGN {0x048A, 0x04FF, prAL, gcLC}, // [118] CYRILLIC CAPITAL LETTER SHORT I WITH TAIL..CYRILLIC SMALL LETTER HA WITH STROKE {0x0500, 0x052F, prAL, gcLC}, // [48] CYRILLIC CAPITAL LETTER KOMI DE..CYRILLIC SMALL LETTER EL WITH DESCENDER {0x0531, 0x0556, prAL, gcLu}, // [38] ARMENIAN CAPITAL LETTER AYB..ARMENIAN CAPITAL LETTER FEH {0x0559, 0x0559, prAL, gcLm}, // ARMENIAN MODIFIER LETTER LEFT HALF RING {0x055A, 0x055F, prAL, gcPo}, // [6] ARMENIAN APOSTROPHE..ARMENIAN ABBREVIATION MARK {0x0560, 0x0588, prAL, gcLl}, // [41] ARMENIAN SMALL LETTER TURNED AYB..ARMENIAN SMALL LETTER YI WITH STROKE {0x0589, 0x0589, prIS, gcPo}, // ARMENIAN FULL STOP {0x058A, 0x058A, prBA, gcPd}, // ARMENIAN HYPHEN {0x058D, 0x058E, prAL, gcSo}, // [2] RIGHT-FACING ARMENIAN ETERNITY SIGN..LEFT-FACING ARMENIAN ETERNITY SIGN {0x058F, 0x058F, prPR, gcSc}, // ARMENIAN DRAM SIGN {0x0591, 0x05BD, prCM, gcMn}, // [45] HEBREW ACCENT ETNAHTA..HEBREW POINT METEG {0x05BE, 0x05BE, prBA, gcPd}, // HEBREW PUNCTUATION MAQAF {0x05BF, 0x05BF, prCM, gcMn}, // HEBREW POINT RAFE {0x05C0, 0x05C0, prAL, gcPo}, // HEBREW PUNCTUATION PASEQ {0x05C1, 0x05C2, prCM, gcMn}, // [2] HEBREW POINT SHIN DOT..HEBREW POINT SIN DOT {0x05C3, 0x05C3, prAL, gcPo}, // HEBREW PUNCTUATION SOF PASUQ {0x05C4, 0x05C5, prCM, gcMn}, // [2] HEBREW MARK UPPER DOT..HEBREW MARK LOWER DOT {0x05C6, 0x05C6, prEX, gcPo}, // HEBREW PUNCTUATION NUN HAFUKHA {0x05C7, 0x05C7, prCM, gcMn}, // HEBREW POINT QAMATS QATAN {0x05D0, 0x05EA, prHL, gcLo}, // [27] HEBREW LETTER ALEF..HEBREW LETTER TAV {0x05EF, 0x05F2, prHL, gcLo}, // [4] HEBREW YOD TRIANGLE..HEBREW LIGATURE YIDDISH DOUBLE YOD {0x05F3, 0x05F4, prAL, gcPo}, // [2] HEBREW PUNCTUATION GERESH..HEBREW PUNCTUATION GERSHAYIM {0x0600, 0x0605, prAL, gcCf}, // [6] ARABIC NUMBER SIGN..ARABIC NUMBER MARK ABOVE {0x0606, 0x0608, prAL, gcSm}, // [3] ARABIC-INDIC CUBE ROOT..ARABIC RAY {0x0609, 0x060A, prPO, gcPo}, // [2] ARABIC-INDIC PER MILLE SIGN..ARABIC-INDIC PER TEN THOUSAND SIGN {0x060B, 0x060B, prPO, gcSc}, // AFGHANI SIGN {0x060C, 0x060D, prIS, gcPo}, // [2] ARABIC COMMA..ARABIC DATE SEPARATOR {0x060E, 0x060F, prAL, gcSo}, // [2] ARABIC POETIC VERSE SIGN..ARABIC SIGN MISRA {0x0610, 0x061A, prCM, gcMn}, // [11] ARABIC SIGN SALLALLAHOU ALAYHE WASSALLAM..ARABIC SMALL KASRA {0x061B, 0x061B, prEX, gcPo}, // ARABIC SEMICOLON {0x061C, 0x061C, prCM, gcCf}, // ARABIC LETTER MARK {0x061D, 0x061F, prEX, gcPo}, // [3] ARABIC END OF TEXT MARK..ARABIC QUESTION MARK {0x0620, 0x063F, prAL, gcLo}, // [32] ARABIC LETTER KASHMIRI YEH..ARABIC LETTER FARSI YEH WITH THREE DOTS ABOVE {0x0640, 0x0640, prAL, gcLm}, // ARABIC TATWEEL {0x0641, 0x064A, prAL, gcLo}, // [10] ARABIC LETTER FEH..ARABIC LETTER YEH {0x064B, 0x065F, prCM, gcMn}, // [21] ARABIC FATHATAN..ARABIC WAVY HAMZA BELOW {0x0660, 0x0669, prNU, gcNd}, // [10] ARABIC-INDIC DIGIT ZERO..ARABIC-INDIC DIGIT NINE {0x066A, 0x066A, prPO, gcPo}, // ARABIC PERCENT SIGN {0x066B, 0x066C, prNU, gcPo}, // [2] ARABIC DECIMAL SEPARATOR..ARABIC THOUSANDS SEPARATOR {0x066D, 0x066D, prAL, gcPo}, // ARABIC FIVE POINTED STAR {0x066E, 0x066F, prAL, gcLo}, // [2] ARABIC LETTER DOTLESS BEH..ARABIC LETTER DOTLESS QAF {0x0670, 0x0670, prCM, gcMn}, // ARABIC LETTER SUPERSCRIPT ALEF {0x0671, 0x06D3, prAL, gcLo}, // [99] ARABIC LETTER ALEF WASLA..ARABIC LETTER YEH BARREE WITH HAMZA ABOVE {0x06D4, 0x06D4, prEX, gcPo}, // ARABIC FULL STOP {0x06D5, 0x06D5, prAL, gcLo}, // ARABIC LETTER AE {0x06D6, 0x06DC, prCM, gcMn}, // [7] ARABIC SMALL HIGH LIGATURE SAD WITH LAM WITH ALEF MAKSURA..ARABIC SMALL HIGH SEEN {0x06DD, 0x06DD, prAL, gcCf}, // ARABIC END OF AYAH {0x06DE, 0x06DE, prAL, gcSo}, // ARABIC START OF RUB EL HIZB {0x06DF, 0x06E4, prCM, gcMn}, // [6] ARABIC SMALL HIGH ROUNDED ZERO..ARABIC SMALL HIGH MADDA {0x06E5, 0x06E6, prAL, gcLm}, // [2] ARABIC SMALL WAW..ARABIC SMALL YEH {0x06E7, 0x06E8, prCM, gcMn}, // [2] ARABIC SMALL HIGH YEH..ARABIC SMALL HIGH NOON {0x06E9, 0x06E9, prAL, gcSo}, // ARABIC PLACE OF SAJDAH {0x06EA, 0x06ED, prCM, gcMn}, // [4] ARABIC EMPTY CENTRE LOW STOP..ARABIC SMALL LOW MEEM {0x06EE, 0x06EF, prAL, gcLo}, // [2] ARABIC LETTER DAL WITH INVERTED V..ARABIC LETTER REH WITH INVERTED V {0x06F0, 0x06F9, prNU, gcNd}, // [10] EXTENDED ARABIC-INDIC DIGIT ZERO..EXTENDED ARABIC-INDIC DIGIT NINE {0x06FA, 0x06FC, prAL, gcLo}, // [3] ARABIC LETTER SHEEN WITH DOT BELOW..ARABIC LETTER GHAIN WITH DOT BELOW {0x06FD, 0x06FE, prAL, gcSo}, // [2] ARABIC SIGN SINDHI AMPERSAND..ARABIC SIGN SINDHI POSTPOSITION MEN {0x06FF, 0x06FF, prAL, gcLo}, // ARABIC LETTER HEH WITH INVERTED V {0x0700, 0x070D, prAL, gcPo}, // [14] SYRIAC END OF PARAGRAPH..SYRIAC HARKLEAN ASTERISCUS {0x070F, 0x070F, prAL, gcCf}, // SYRIAC ABBREVIATION MARK {0x0710, 0x0710, prAL, gcLo}, // SYRIAC LETTER ALAPH {0x0711, 0x0711, prCM, gcMn}, // SYRIAC LETTER SUPERSCRIPT ALAPH {0x0712, 0x072F, prAL, gcLo}, // [30] SYRIAC LETTER BETH..SYRIAC LETTER PERSIAN DHALATH {0x0730, 0x074A, prCM, gcMn}, // [27] SYRIAC PTHAHA ABOVE..SYRIAC BARREKH {0x074D, 0x074F, prAL, gcLo}, // [3] SYRIAC LETTER SOGDIAN ZHAIN..SYRIAC LETTER SOGDIAN FE {0x0750, 0x077F, prAL, gcLo}, // [48] ARABIC LETTER BEH WITH THREE DOTS HORIZONTALLY BELOW..ARABIC LETTER KAF WITH TWO DOTS ABOVE {0x0780, 0x07A5, prAL, gcLo}, // [38] THAANA LETTER HAA..THAANA LETTER WAAVU {0x07A6, 0x07B0, prCM, gcMn}, // [11] THAANA ABAFILI..THAANA SUKUN {0x07B1, 0x07B1, prAL, gcLo}, // THAANA LETTER NAA {0x07C0, 0x07C9, prNU, gcNd}, // [10] NKO DIGIT ZERO..NKO DIGIT NINE {0x07CA, 0x07EA, prAL, gcLo}, // [33] NKO LETTER A..NKO LETTER JONA RA {0x07EB, 0x07F3, prCM, gcMn}, // [9] NKO COMBINING SHORT HIGH TONE..NKO COMBINING DOUBLE DOT ABOVE {0x07F4, 0x07F5, prAL, gcLm}, // [2] NKO HIGH TONE APOSTROPHE..NKO LOW TONE APOSTROPHE {0x07F6, 0x07F6, prAL, gcSo}, // NKO SYMBOL OO DENNEN {0x07F7, 0x07F7, prAL, gcPo}, // NKO SYMBOL GBAKURUNEN {0x07F8, 0x07F8, prIS, gcPo}, // NKO COMMA {0x07F9, 0x07F9, prEX, gcPo}, // NKO EXCLAMATION MARK {0x07FA, 0x07FA, prAL, gcLm}, // NKO LAJANYALAN {0x07FD, 0x07FD, prCM, gcMn}, // NKO DANTAYALAN {0x07FE, 0x07FF, prPR, gcSc}, // [2] NKO DOROME SIGN..NKO TAMAN SIGN {0x0800, 0x0815, prAL, gcLo}, // [22] SAMARITAN LETTER ALAF..SAMARITAN LETTER TAAF {0x0816, 0x0819, prCM, gcMn}, // [4] SAMARITAN MARK IN..SAMARITAN MARK DAGESH {0x081A, 0x081A, prAL, gcLm}, // SAMARITAN MODIFIER LETTER EPENTHETIC YUT {0x081B, 0x0823, prCM, gcMn}, // [9] SAMARITAN MARK EPENTHETIC YUT..SAMARITAN VOWEL SIGN A {0x0824, 0x0824, prAL, gcLm}, // SAMARITAN MODIFIER LETTER SHORT A {0x0825, 0x0827, prCM, gcMn}, // [3] SAMARITAN VOWEL SIGN SHORT A..SAMARITAN VOWEL SIGN U {0x0828, 0x0828, prAL, gcLm}, // SAMARITAN MODIFIER LETTER I {0x0829, 0x082D, prCM, gcMn}, // [5] SAMARITAN VOWEL SIGN LONG I..SAMARITAN MARK NEQUDAA {0x0830, 0x083E, prAL, gcPo}, // [15] SAMARITAN PUNCTUATION NEQUDAA..SAMARITAN PUNCTUATION ANNAAU {0x0840, 0x0858, prAL, gcLo}, // [25] MANDAIC LETTER HALQA..MANDAIC LETTER AIN {0x0859, 0x085B, prCM, gcMn}, // [3] MANDAIC AFFRICATION MARK..MANDAIC GEMINATION MARK {0x085E, 0x085E, prAL, gcPo}, // MANDAIC PUNCTUATION {0x0860, 0x086A, prAL, gcLo}, // [11] SYRIAC LETTER MALAYALAM NGA..SYRIAC LETTER MALAYALAM SSA {0x0870, 0x0887, prAL, gcLo}, // [24] ARABIC LETTER ALEF WITH ATTACHED FATHA..ARABIC BASELINE ROUND DOT {0x0888, 0x0888, prAL, gcSk}, // ARABIC RAISED ROUND DOT {0x0889, 0x088E, prAL, gcLo}, // [6] ARABIC LETTER NOON WITH INVERTED SMALL V..ARABIC VERTICAL TAIL {0x0890, 0x0891, prAL, gcCf}, // [2] ARABIC POUND MARK ABOVE..ARABIC PIASTRE MARK ABOVE {0x0898, 0x089F, prCM, gcMn}, // [8] ARABIC SMALL HIGH WORD AL-JUZ..ARABIC HALF MADDA OVER MADDA {0x08A0, 0x08C8, prAL, gcLo}, // [41] ARABIC LETTER BEH WITH SMALL V BELOW..ARABIC LETTER GRAF {0x08C9, 0x08C9, prAL, gcLm}, // ARABIC SMALL FARSI YEH {0x08CA, 0x08E1, prCM, gcMn}, // [24] ARABIC SMALL HIGH FARSI YEH..ARABIC SMALL HIGH SIGN SAFHA {0x08E2, 0x08E2, prAL, gcCf}, // ARABIC DISPUTED END OF AYAH {0x08E3, 0x08FF, prCM, gcMn}, // [29] ARABIC TURNED DAMMA BELOW..ARABIC MARK SIDEWAYS NOON GHUNNA {0x0900, 0x0902, prCM, gcMn}, // [3] DEVANAGARI SIGN INVERTED CANDRABINDU..DEVANAGARI SIGN ANUSVARA {0x0903, 0x0903, prCM, gcMc}, // DEVANAGARI SIGN VISARGA {0x0904, 0x0939, prAL, gcLo}, // [54] DEVANAGARI LETTER SHORT A..DEVANAGARI LETTER HA {0x093A, 0x093A, prCM, gcMn}, // DEVANAGARI VOWEL SIGN OE {0x093B, 0x093B, prCM, gcMc}, // DEVANAGARI VOWEL SIGN OOE {0x093C, 0x093C, prCM, gcMn}, // DEVANAGARI SIGN NUKTA {0x093D, 0x093D, prAL, gcLo}, // DEVANAGARI SIGN AVAGRAHA {0x093E, 0x0940, prCM, gcMc}, // [3] DEVANAGARI VOWEL SIGN AA..DEVANAGARI VOWEL SIGN II {0x0941, 0x0948, prCM, gcMn}, // [8] DEVANAGARI VOWEL SIGN U..DEVANAGARI VOWEL SIGN AI {0x0949, 0x094C, prCM, gcMc}, // [4] DEVANAGARI VOWEL SIGN CANDRA O..DEVANAGARI VOWEL SIGN AU {0x094D, 0x094D, prCM, gcMn}, // DEVANAGARI SIGN VIRAMA {0x094E, 0x094F, prCM, gcMc}, // [2] DEVANAGARI VOWEL SIGN PRISHTHAMATRA E..DEVANAGARI VOWEL SIGN AW {0x0950, 0x0950, prAL, gcLo}, // DEVANAGARI OM {0x0951, 0x0957, prCM, gcMn}, // [7] DEVANAGARI STRESS SIGN UDATTA..DEVANAGARI VOWEL SIGN UUE {0x0958, 0x0961, prAL, gcLo}, // [10] DEVANAGARI LETTER QA..DEVANAGARI LETTER VOCALIC LL {0x0962, 0x0963, prCM, gcMn}, // [2] DEVANAGARI VOWEL SIGN VOCALIC L..DEVANAGARI VOWEL SIGN VOCALIC LL {0x0964, 0x0965, prBA, gcPo}, // [2] DEVANAGARI DANDA..DEVANAGARI DOUBLE DANDA {0x0966, 0x096F, prNU, gcNd}, // [10] DEVANAGARI DIGIT ZERO..DEVANAGARI DIGIT NINE {0x0970, 0x0970, prAL, gcPo}, // DEVANAGARI ABBREVIATION SIGN {0x0971, 0x0971, prAL, gcLm}, // DEVANAGARI SIGN HIGH SPACING DOT {0x0972, 0x097F, prAL, gcLo}, // [14] DEVANAGARI LETTER CANDRA A..DEVANAGARI LETTER BBA {0x0980, 0x0980, prAL, gcLo}, // BENGALI ANJI {0x0981, 0x0981, prCM, gcMn}, // BENGALI SIGN CANDRABINDU {0x0982, 0x0983, prCM, gcMc}, // [2] BENGALI SIGN ANUSVARA..BENGALI SIGN VISARGA {0x0985, 0x098C, prAL, gcLo}, // [8] BENGALI LETTER A..BENGALI LETTER VOCALIC L {0x098F, 0x0990, prAL, gcLo}, // [2] BENGALI LETTER E..BENGALI LETTER AI {0x0993, 0x09A8, prAL, gcLo}, // [22] BENGALI LETTER O..BENGALI LETTER NA {0x09AA, 0x09B0, prAL, gcLo}, // [7] BENGALI LETTER PA..BENGALI LETTER RA {0x09B2, 0x09B2, prAL, gcLo}, // BENGALI LETTER LA {0x09B6, 0x09B9, prAL, gcLo}, // [4] BENGALI LETTER SHA..BENGALI LETTER HA {0x09BC, 0x09BC, prCM, gcMn}, // BENGALI SIGN NUKTA {0x09BD, 0x09BD, prAL, gcLo}, // BENGALI SIGN AVAGRAHA {0x09BE, 0x09C0, prCM, gcMc}, // [3] BENGALI VOWEL SIGN AA..BENGALI VOWEL SIGN II {0x09C1, 0x09C4, prCM, gcMn}, // [4] BENGALI VOWEL SIGN U..BENGALI VOWEL SIGN VOCALIC RR {0x09C7, 0x09C8, prCM, gcMc}, // [2] BENGALI VOWEL SIGN E..BENGALI VOWEL SIGN AI {0x09CB, 0x09CC, prCM, gcMc}, // [2] BENGALI VOWEL SIGN O..BENGALI VOWEL SIGN AU {0x09CD, 0x09CD, prCM, gcMn}, // BENGALI SIGN VIRAMA {0x09CE, 0x09CE, prAL, gcLo}, // BENGALI LETTER KHANDA TA {0x09D7, 0x09D7, prCM, gcMc}, // BENGALI AU LENGTH MARK {0x09DC, 0x09DD, prAL, gcLo}, // [2] BENGALI LETTER RRA..BENGALI LETTER RHA {0x09DF, 0x09E1, prAL, gcLo}, // [3] BENGALI LETTER YYA..BENGALI LETTER VOCALIC LL {0x09E2, 0x09E3, prCM, gcMn}, // [2] BENGALI VOWEL SIGN VOCALIC L..BENGALI VOWEL SIGN VOCALIC LL {0x09E6, 0x09EF, prNU, gcNd}, // [10] BENGALI DIGIT ZERO..BENGALI DIGIT NINE {0x09F0, 0x09F1, prAL, gcLo}, // [2] BENGALI LETTER RA WITH MIDDLE DIAGONAL..BENGALI LETTER RA WITH LOWER DIAGONAL {0x09F2, 0x09F3, prPO, gcSc}, // [2] BENGALI RUPEE MARK..BENGALI RUPEE SIGN {0x09F4, 0x09F8, prAL, gcNo}, // [5] BENGALI CURRENCY NUMERATOR ONE..BENGALI CURRENCY NUMERATOR ONE LESS THAN THE DENOMINATOR {0x09F9, 0x09F9, prPO, gcNo}, // BENGALI CURRENCY DENOMINATOR SIXTEEN {0x09FA, 0x09FA, prAL, gcSo}, // BENGALI ISSHAR {0x09FB, 0x09FB, prPR, gcSc}, // BENGALI GANDA MARK {0x09FC, 0x09FC, prAL, gcLo}, // BENGALI LETTER VEDIC ANUSVARA {0x09FD, 0x09FD, prAL, gcPo}, // BENGALI ABBREVIATION SIGN {0x09FE, 0x09FE, prCM, gcMn}, // BENGALI SANDHI MARK {0x0A01, 0x0A02, prCM, gcMn}, // [2] GURMUKHI SIGN ADAK BINDI..GURMUKHI SIGN BINDI {0x0A03, 0x0A03, prCM, gcMc}, // GURMUKHI SIGN VISARGA {0x0A05, 0x0A0A, prAL, gcLo}, // [6] GURMUKHI LETTER A..GURMUKHI LETTER UU {0x0A0F, 0x0A10, prAL, gcLo}, // [2] GURMUKHI LETTER EE..GURMUKHI LETTER AI {0x0A13, 0x0A28, prAL, gcLo}, // [22] GURMUKHI LETTER OO..GURMUKHI LETTER NA {0x0A2A, 0x0A30, prAL, gcLo}, // [7] GURMUKHI LETTER PA..GURMUKHI LETTER RA {0x0A32, 0x0A33, prAL, gcLo}, // [2] GURMUKHI LETTER LA..GURMUKHI LETTER LLA {0x0A35, 0x0A36, prAL, gcLo}, // [2] GURMUKHI LETTER VA..GURMUKHI LETTER SHA {0x0A38, 0x0A39, prAL, gcLo}, // [2] GURMUKHI LETTER SA..GURMUKHI LETTER HA {0x0A3C, 0x0A3C, prCM, gcMn}, // GURMUKHI SIGN NUKTA {0x0A3E, 0x0A40, prCM, gcMc}, // [3] GURMUKHI VOWEL SIGN AA..GURMUKHI VOWEL SIGN II {0x0A41, 0x0A42, prCM, gcMn}, // [2] GURMUKHI VOWEL SIGN U..GURMUKHI VOWEL SIGN UU {0x0A47, 0x0A48, prCM, gcMn}, // [2] GURMUKHI VOWEL SIGN EE..GURMUKHI VOWEL SIGN AI {0x0A4B, 0x0A4D, prCM, gcMn}, // [3] GURMUKHI VOWEL SIGN OO..GURMUKHI SIGN VIRAMA {0x0A51, 0x0A51, prCM, gcMn}, // GURMUKHI SIGN UDAAT {0x0A59, 0x0A5C, prAL, gcLo}, // [4] GURMUKHI LETTER KHHA..GURMUKHI LETTER RRA {0x0A5E, 0x0A5E, prAL, gcLo}, // GURMUKHI LETTER FA {0x0A66, 0x0A6F, prNU, gcNd}, // [10] GURMUKHI DIGIT ZERO..GURMUKHI DIGIT NINE {0x0A70, 0x0A71, prCM, gcMn}, // [2] GURMUKHI TIPPI..GURMUKHI ADDAK {0x0A72, 0x0A74, prAL, gcLo}, // [3] GURMUKHI IRI..GURMUKHI EK ONKAR {0x0A75, 0x0A75, prCM, gcMn}, // GURMUKHI SIGN YAKASH {0x0A76, 0x0A76, prAL, gcPo}, // GURMUKHI ABBREVIATION SIGN {0x0A81, 0x0A82, prCM, gcMn}, // [2] GUJARATI SIGN CANDRABINDU..GUJARATI SIGN ANUSVARA {0x0A83, 0x0A83, prCM, gcMc}, // GUJARATI SIGN VISARGA {0x0A85, 0x0A8D, prAL, gcLo}, // [9] GUJARATI LETTER A..GUJARATI VOWEL CANDRA E {0x0A8F, 0x0A91, prAL, gcLo}, // [3] GUJARATI LETTER E..GUJARATI VOWEL CANDRA O {0x0A93, 0x0AA8, prAL, gcLo}, // [22] GUJARATI LETTER O..GUJARATI LETTER NA {0x0AAA, 0x0AB0, prAL, gcLo}, // [7] GUJARATI LETTER PA..GUJARATI LETTER RA {0x0AB2, 0x0AB3, prAL, gcLo}, // [2] GUJARATI LETTER LA..GUJARATI LETTER LLA {0x0AB5, 0x0AB9, prAL, gcLo}, // [5] GUJARATI LETTER VA..GUJARATI LETTER HA {0x0ABC, 0x0ABC, prCM, gcMn}, // GUJARATI SIGN NUKTA {0x0ABD, 0x0ABD, prAL, gcLo}, // GUJARATI SIGN AVAGRAHA {0x0ABE, 0x0AC0, prCM, gcMc}, // [3] GUJARATI VOWEL SIGN AA..GUJARATI VOWEL SIGN II {0x0AC1, 0x0AC5, prCM, gcMn}, // [5] GUJARATI VOWEL SIGN U..GUJARATI VOWEL SIGN CANDRA E {0x0AC7, 0x0AC8, prCM, gcMn}, // [2] GUJARATI VOWEL SIGN E..GUJARATI VOWEL SIGN AI {0x0AC9, 0x0AC9, prCM, gcMc}, // GUJARATI VOWEL SIGN CANDRA O {0x0ACB, 0x0ACC, prCM, gcMc}, // [2] GUJARATI VOWEL SIGN O..GUJARATI VOWEL SIGN AU {0x0ACD, 0x0ACD, prCM, gcMn}, // GUJARATI SIGN VIRAMA {0x0AD0, 0x0AD0, prAL, gcLo}, // GUJARATI OM {0x0AE0, 0x0AE1, prAL, gcLo}, // [2] GUJARATI LETTER VOCALIC RR..GUJARATI LETTER VOCALIC LL {0x0AE2, 0x0AE3, prCM, gcMn}, // [2] GUJARATI VOWEL SIGN VOCALIC L..GUJARATI VOWEL SIGN VOCALIC LL {0x0AE6, 0x0AEF, prNU, gcNd}, // [10] GUJARATI DIGIT ZERO..GUJARATI DIGIT NINE {0x0AF0, 0x0AF0, prAL, gcPo}, // GUJARATI ABBREVIATION SIGN {0x0AF1, 0x0AF1, prPR, gcSc}, // GUJARATI RUPEE SIGN {0x0AF9, 0x0AF9, prAL, gcLo}, // GUJARATI LETTER ZHA {0x0AFA, 0x0AFF, prCM, gcMn}, // [6] GUJARATI SIGN SUKUN..GUJARATI SIGN TWO-CIRCLE NUKTA ABOVE {0x0B01, 0x0B01, prCM, gcMn}, // ORIYA SIGN CANDRABINDU {0x0B02, 0x0B03, prCM, gcMc}, // [2] ORIYA SIGN ANUSVARA..ORIYA SIGN VISARGA {0x0B05, 0x0B0C, prAL, gcLo}, // [8] ORIYA LETTER A..ORIYA LETTER VOCALIC L {0x0B0F, 0x0B10, prAL, gcLo}, // [2] ORIYA LETTER E..ORIYA LETTER AI {0x0B13, 0x0B28, prAL, gcLo}, // [22] ORIYA LETTER O..ORIYA LETTER NA {0x0B2A, 0x0B30, prAL, gcLo}, // [7] ORIYA LETTER PA..ORIYA LETTER RA {0x0B32, 0x0B33, prAL, gcLo}, // [2] ORIYA LETTER LA..ORIYA LETTER LLA {0x0B35, 0x0B39, prAL, gcLo}, // [5] ORIYA LETTER VA..ORIYA LETTER HA {0x0B3C, 0x0B3C, prCM, gcMn}, // ORIYA SIGN NUKTA {0x0B3D, 0x0B3D, prAL, gcLo}, // ORIYA SIGN AVAGRAHA {0x0B3E, 0x0B3E, prCM, gcMc}, // ORIYA VOWEL SIGN AA {0x0B3F, 0x0B3F, prCM, gcMn}, // ORIYA VOWEL SIGN I {0x0B40, 0x0B40, prCM, gcMc}, // ORIYA VOWEL SIGN II {0x0B41, 0x0B44, prCM, gcMn}, // [4] ORIYA VOWEL SIGN U..ORIYA VOWEL SIGN VOCALIC RR {0x0B47, 0x0B48, prCM, gcMc}, // [2] ORIYA VOWEL SIGN E..ORIYA VOWEL SIGN AI {0x0B4B, 0x0B4C, prCM, gcMc}, // [2] ORIYA VOWEL SIGN O..ORIYA VOWEL SIGN AU {0x0B4D, 0x0B4D, prCM, gcMn}, // ORIYA SIGN VIRAMA {0x0B55, 0x0B56, prCM, gcMn}, // [2] ORIYA SIGN OVERLINE..ORIYA AI LENGTH MARK {0x0B57, 0x0B57, prCM, gcMc}, // ORIYA AU LENGTH MARK {0x0B5C, 0x0B5D, prAL, gcLo}, // [2] ORIYA LETTER RRA..ORIYA LETTER RHA {0x0B5F, 0x0B61, prAL, gcLo}, // [3] ORIYA LETTER YYA..ORIYA LETTER VOCALIC LL {0x0B62, 0x0B63, prCM, gcMn}, // [2] ORIYA VOWEL SIGN VOCALIC L..ORIYA VOWEL SIGN VOCALIC LL {0x0B66, 0x0B6F, prNU, gcNd}, // [10] ORIYA DIGIT ZERO..ORIYA DIGIT NINE {0x0B70, 0x0B70, prAL, gcSo}, // ORIYA ISSHAR {0x0B71, 0x0B71, prAL, gcLo}, // ORIYA LETTER WA {0x0B72, 0x0B77, prAL, gcNo}, // [6] ORIYA FRACTION ONE QUARTER..ORIYA FRACTION THREE SIXTEENTHS {0x0B82, 0x0B82, prCM, gcMn}, // TAMIL SIGN ANUSVARA {0x0B83, 0x0B83, prAL, gcLo}, // TAMIL SIGN VISARGA {0x0B85, 0x0B8A, prAL, gcLo}, // [6] TAMIL LETTER A..TAMIL LETTER UU {0x0B8E, 0x0B90, prAL, gcLo}, // [3] TAMIL LETTER E..TAMIL LETTER AI {0x0B92, 0x0B95, prAL, gcLo}, // [4] TAMIL LETTER O..TAMIL LETTER KA {0x0B99, 0x0B9A, prAL, gcLo}, // [2] TAMIL LETTER NGA..TAMIL LETTER CA {0x0B9C, 0x0B9C, prAL, gcLo}, // TAMIL LETTER JA {0x0B9E, 0x0B9F, prAL, gcLo}, // [2] TAMIL LETTER NYA..TAMIL LETTER TTA {0x0BA3, 0x0BA4, prAL, gcLo}, // [2] TAMIL LETTER NNA..TAMIL LETTER TA {0x0BA8, 0x0BAA, prAL, gcLo}, // [3] TAMIL LETTER NA..TAMIL LETTER PA {0x0BAE, 0x0BB9, prAL, gcLo}, // [12] TAMIL LETTER MA..TAMIL LETTER HA {0x0BBE, 0x0BBF, prCM, gcMc}, // [2] TAMIL VOWEL SIGN AA..TAMIL VOWEL SIGN I {0x0BC0, 0x0BC0, prCM, gcMn}, // TAMIL VOWEL SIGN II {0x0BC1, 0x0BC2, prCM, gcMc}, // [2] TAMIL VOWEL SIGN U..TAMIL VOWEL SIGN UU {0x0BC6, 0x0BC8, prCM, gcMc}, // [3] TAMIL VOWEL SIGN E..TAMIL VOWEL SIGN AI {0x0BCA, 0x0BCC, prCM, gcMc}, // [3] TAMIL VOWEL SIGN O..TAMIL VOWEL SIGN AU {0x0BCD, 0x0BCD, prCM, gcMn}, // TAMIL SIGN VIRAMA {0x0BD0, 0x0BD0, prAL, gcLo}, // TAMIL OM {0x0BD7, 0x0BD7, prCM, gcMc}, // TAMIL AU LENGTH MARK {0x0BE6, 0x0BEF, prNU, gcNd}, // [10] TAMIL DIGIT ZERO..TAMIL DIGIT NINE {0x0BF0, 0x0BF2, prAL, gcNo}, // [3] TAMIL NUMBER TEN..TAMIL NUMBER ONE THOUSAND {0x0BF3, 0x0BF8, prAL, gcSo}, // [6] TAMIL DAY SIGN..TAMIL AS ABOVE SIGN {0x0BF9, 0x0BF9, prPR, gcSc}, // TAMIL RUPEE SIGN {0x0BFA, 0x0BFA, prAL, gcSo}, // TAMIL NUMBER SIGN {0x0C00, 0x0C00, prCM, gcMn}, // TELUGU SIGN COMBINING CANDRABINDU ABOVE {0x0C01, 0x0C03, prCM, gcMc}, // [3] TELUGU SIGN CANDRABINDU..TELUGU SIGN VISARGA {0x0C04, 0x0C04, prCM, gcMn}, // TELUGU SIGN COMBINING ANUSVARA ABOVE {0x0C05, 0x0C0C, prAL, gcLo}, // [8] TELUGU LETTER A..TELUGU LETTER VOCALIC L {0x0C0E, 0x0C10, prAL, gcLo}, // [3] TELUGU LETTER E..TELUGU LETTER AI {0x0C12, 0x0C28, prAL, gcLo}, // [23] TELUGU LETTER O..TELUGU LETTER NA {0x0C2A, 0x0C39, prAL, gcLo}, // [16] TELUGU LETTER PA..TELUGU LETTER HA {0x0C3C, 0x0C3C, prCM, gcMn}, // TELUGU SIGN NUKTA {0x0C3D, 0x0C3D, prAL, gcLo}, // TELUGU SIGN AVAGRAHA {0x0C3E, 0x0C40, prCM, gcMn}, // [3] TELUGU VOWEL SIGN AA..TELUGU VOWEL SIGN II {0x0C41, 0x0C44, prCM, gcMc}, // [4] TELUGU VOWEL SIGN U..TELUGU VOWEL SIGN VOCALIC RR {0x0C46, 0x0C48, prCM, gcMn}, // [3] TELUGU VOWEL SIGN E..TELUGU VOWEL SIGN AI
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
true