repo
stringlengths
6
47
file_url
stringlengths
77
269
file_path
stringlengths
5
186
content
stringlengths
0
32.8k
language
stringclasses
1 value
license
stringclasses
7 values
commit_sha
stringlengths
40
40
retrieved_at
stringdate
2026-01-07 08:35:43
2026-01-07 08:55:24
truncated
bool
2 classes
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/golang/protobuf/proto/extensions.go
vendor/github.com/golang/protobuf/proto/extensions.go
// Copyright 2010 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package proto import ( "errors" "fmt" "reflect" "google.golang.org/protobuf/encoding/protowire" "google.golang.org/protobuf/proto" "google.golang.org/protobuf/reflect/protoreflect" "google.golang.org/protobuf/reflect/protoregistry" "google.golang.org/protobuf/runtime/protoiface" "google.golang.org/protobuf/runtime/protoimpl" ) type ( // ExtensionDesc represents an extension descriptor and // is used to interact with an extension field in a message. // // Variables of this type are generated in code by protoc-gen-go. ExtensionDesc = protoimpl.ExtensionInfo // ExtensionRange represents a range of message extensions. // Used in code generated by protoc-gen-go. ExtensionRange = protoiface.ExtensionRangeV1 // Deprecated: Do not use; this is an internal type. Extension = protoimpl.ExtensionFieldV1 // Deprecated: Do not use; this is an internal type. XXX_InternalExtensions = protoimpl.ExtensionFields ) // ErrMissingExtension reports whether the extension was not present. var ErrMissingExtension = errors.New("proto: missing extension") var errNotExtendable = errors.New("proto: not an extendable proto.Message") // HasExtension reports whether the extension field is present in m // either as an explicitly populated field or as an unknown field. func HasExtension(m Message, xt *ExtensionDesc) (has bool) { mr := MessageReflect(m) if mr == nil || !mr.IsValid() { return false } // Check whether any populated known field matches the field number. xtd := xt.TypeDescriptor() if isValidExtension(mr.Descriptor(), xtd) { has = mr.Has(xtd) } else { mr.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool { has = int32(fd.Number()) == xt.Field return !has }) } // Check whether any unknown field matches the field number. for b := mr.GetUnknown(); !has && len(b) > 0; { num, _, n := protowire.ConsumeField(b) has = int32(num) == xt.Field b = b[n:] } return has } // ClearExtension removes the extension field from m // either as an explicitly populated field or as an unknown field. func ClearExtension(m Message, xt *ExtensionDesc) { mr := MessageReflect(m) if mr == nil || !mr.IsValid() { return } xtd := xt.TypeDescriptor() if isValidExtension(mr.Descriptor(), xtd) { mr.Clear(xtd) } else { mr.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool { if int32(fd.Number()) == xt.Field { mr.Clear(fd) return false } return true }) } clearUnknown(mr, fieldNum(xt.Field)) } // ClearAllExtensions clears all extensions from m. // This includes populated fields and unknown fields in the extension range. func ClearAllExtensions(m Message) { mr := MessageReflect(m) if mr == nil || !mr.IsValid() { return } mr.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool { if fd.IsExtension() { mr.Clear(fd) } return true }) clearUnknown(mr, mr.Descriptor().ExtensionRanges()) } // GetExtension retrieves a proto2 extended field from m. // // If the descriptor is type complete (i.e., ExtensionDesc.ExtensionType is non-nil), // then GetExtension parses the encoded field and returns a Go value of the specified type. // If the field is not present, then the default value is returned (if one is specified), // otherwise ErrMissingExtension is reported. // // If the descriptor is type incomplete (i.e., ExtensionDesc.ExtensionType is nil), // then GetExtension returns the raw encoded bytes for the extension field. func GetExtension(m Message, xt *ExtensionDesc) (interface{}, error) { mr := MessageReflect(m) if mr == nil || !mr.IsValid() || mr.Descriptor().ExtensionRanges().Len() == 0 { return nil, errNotExtendable } // Retrieve the unknown fields for this extension field. var bo protoreflect.RawFields for bi := mr.GetUnknown(); len(bi) > 0; { num, _, n := protowire.ConsumeField(bi) if int32(num) == xt.Field { bo = append(bo, bi[:n]...) } bi = bi[n:] } // For type incomplete descriptors, only retrieve the unknown fields. if xt.ExtensionType == nil { return []byte(bo), nil } // If the extension field only exists as unknown fields, unmarshal it. // This is rarely done since proto.Unmarshal eagerly unmarshals extensions. xtd := xt.TypeDescriptor() if !isValidExtension(mr.Descriptor(), xtd) { return nil, fmt.Errorf("proto: bad extended type; %T does not extend %T", xt.ExtendedType, m) } if !mr.Has(xtd) && len(bo) > 0 { m2 := mr.New() if err := (proto.UnmarshalOptions{ Resolver: extensionResolver{xt}, }.Unmarshal(bo, m2.Interface())); err != nil { return nil, err } if m2.Has(xtd) { mr.Set(xtd, m2.Get(xtd)) clearUnknown(mr, fieldNum(xt.Field)) } } // Check whether the message has the extension field set or a default. var pv protoreflect.Value switch { case mr.Has(xtd): pv = mr.Get(xtd) case xtd.HasDefault(): pv = xtd.Default() default: return nil, ErrMissingExtension } v := xt.InterfaceOf(pv) rv := reflect.ValueOf(v) if isScalarKind(rv.Kind()) { rv2 := reflect.New(rv.Type()) rv2.Elem().Set(rv) v = rv2.Interface() } return v, nil } // extensionResolver is a custom extension resolver that stores a single // extension type that takes precedence over the global registry. type extensionResolver struct{ xt protoreflect.ExtensionType } func (r extensionResolver) FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) { if xtd := r.xt.TypeDescriptor(); xtd.FullName() == field { return r.xt, nil } return protoregistry.GlobalTypes.FindExtensionByName(field) } func (r extensionResolver) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) { if xtd := r.xt.TypeDescriptor(); xtd.ContainingMessage().FullName() == message && xtd.Number() == field { return r.xt, nil } return protoregistry.GlobalTypes.FindExtensionByNumber(message, field) } // GetExtensions returns a list of the extensions values present in m, // corresponding with the provided list of extension descriptors, xts. // If an extension is missing in m, the corresponding value is nil. func GetExtensions(m Message, xts []*ExtensionDesc) ([]interface{}, error) { mr := MessageReflect(m) if mr == nil || !mr.IsValid() { return nil, errNotExtendable } vs := make([]interface{}, len(xts)) for i, xt := range xts { v, err := GetExtension(m, xt) if err != nil { if err == ErrMissingExtension { continue } return vs, err } vs[i] = v } return vs, nil } // SetExtension sets an extension field in m to the provided value. func SetExtension(m Message, xt *ExtensionDesc, v interface{}) error { mr := MessageReflect(m) if mr == nil || !mr.IsValid() || mr.Descriptor().ExtensionRanges().Len() == 0 { return errNotExtendable } rv := reflect.ValueOf(v) if reflect.TypeOf(v) != reflect.TypeOf(xt.ExtensionType) { return fmt.Errorf("proto: bad extension value type. got: %T, want: %T", v, xt.ExtensionType) } if rv.Kind() == reflect.Ptr { if rv.IsNil() { return fmt.Errorf("proto: SetExtension called with nil value of type %T", v) } if isScalarKind(rv.Elem().Kind()) { v = rv.Elem().Interface() } } xtd := xt.TypeDescriptor() if !isValidExtension(mr.Descriptor(), xtd) { return fmt.Errorf("proto: bad extended type; %T does not extend %T", xt.ExtendedType, m) } mr.Set(xtd, xt.ValueOf(v)) clearUnknown(mr, fieldNum(xt.Field)) return nil } // SetRawExtension inserts b into the unknown fields of m. // // Deprecated: Use Message.ProtoReflect.SetUnknown instead. func SetRawExtension(m Message, fnum int32, b []byte) { mr := MessageReflect(m) if mr == nil || !mr.IsValid() { return } // Verify that the raw field is valid. for b0 := b; len(b0) > 0; { num, _, n := protowire.ConsumeField(b0) if int32(num) != fnum { panic(fmt.Sprintf("mismatching field number: got %d, want %d", num, fnum)) } b0 = b0[n:] } ClearExtension(m, &ExtensionDesc{Field: fnum}) mr.SetUnknown(append(mr.GetUnknown(), b...)) } // ExtensionDescs returns a list of extension descriptors found in m, // containing descriptors for both populated extension fields in m and // also unknown fields of m that are in the extension range. // For the later case, an type incomplete descriptor is provided where only // the ExtensionDesc.Field field is populated. // The order of the extension descriptors is undefined. func ExtensionDescs(m Message) ([]*ExtensionDesc, error) { mr := MessageReflect(m) if mr == nil || !mr.IsValid() || mr.Descriptor().ExtensionRanges().Len() == 0 { return nil, errNotExtendable } // Collect a set of known extension descriptors. extDescs := make(map[protoreflect.FieldNumber]*ExtensionDesc) mr.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { if fd.IsExtension() { xt := fd.(protoreflect.ExtensionTypeDescriptor) if xd, ok := xt.Type().(*ExtensionDesc); ok { extDescs[fd.Number()] = xd } } return true }) // Collect a set of unknown extension descriptors. extRanges := mr.Descriptor().ExtensionRanges() for b := mr.GetUnknown(); len(b) > 0; { num, _, n := protowire.ConsumeField(b) if extRanges.Has(num) && extDescs[num] == nil { extDescs[num] = nil } b = b[n:] } // Transpose the set of descriptors into a list. var xts []*ExtensionDesc for num, xt := range extDescs { if xt == nil { xt = &ExtensionDesc{Field: int32(num)} } xts = append(xts, xt) } return xts, nil } // isValidExtension reports whether xtd is a valid extension descriptor for md. func isValidExtension(md protoreflect.MessageDescriptor, xtd protoreflect.ExtensionTypeDescriptor) bool { return xtd.ContainingMessage() == md && md.ExtensionRanges().Has(xtd.Number()) } // isScalarKind reports whether k is a protobuf scalar kind (except bytes). // This function exists for historical reasons since the representation of // scalars differs between v1 and v2, where v1 uses *T and v2 uses T. func isScalarKind(k reflect.Kind) bool { switch k { case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String: return true default: return false } } // clearUnknown removes unknown fields from m where remover.Has reports true. func clearUnknown(m protoreflect.Message, remover interface { Has(protoreflect.FieldNumber) bool }) { var bo protoreflect.RawFields for bi := m.GetUnknown(); len(bi) > 0; { num, _, n := protowire.ConsumeField(bi) if !remover.Has(num) { bo = append(bo, bi[:n]...) } bi = bi[n:] } if bi := m.GetUnknown(); len(bi) != len(bo) { m.SetUnknown(bo) } } type fieldNum protoreflect.FieldNumber func (n1 fieldNum) Has(n2 protoreflect.FieldNumber) bool { return protoreflect.FieldNumber(n1) == n2 }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/golang/protobuf/proto/properties.go
vendor/github.com/golang/protobuf/proto/properties.go
// Copyright 2010 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package proto import ( "fmt" "reflect" "strconv" "strings" "sync" "google.golang.org/protobuf/reflect/protoreflect" "google.golang.org/protobuf/runtime/protoimpl" ) // StructProperties represents protocol buffer type information for a // generated protobuf message in the open-struct API. // // Deprecated: Do not use. type StructProperties struct { // Prop are the properties for each field. // // Fields belonging to a oneof are stored in OneofTypes instead, with a // single Properties representing the parent oneof held here. // // The order of Prop matches the order of fields in the Go struct. // Struct fields that are not related to protobufs have a "XXX_" prefix // in the Properties.Name and must be ignored by the user. Prop []*Properties // OneofTypes contains information about the oneof fields in this message. // It is keyed by the protobuf field name. OneofTypes map[string]*OneofProperties } // Properties represents the type information for a protobuf message field. // // Deprecated: Do not use. type Properties struct { // Name is a placeholder name with little meaningful semantic value. // If the name has an "XXX_" prefix, the entire Properties must be ignored. Name string // OrigName is the protobuf field name or oneof name. OrigName string // JSONName is the JSON name for the protobuf field. JSONName string // Enum is a placeholder name for enums. // For historical reasons, this is neither the Go name for the enum, // nor the protobuf name for the enum. Enum string // Deprecated: Do not use. // Weak contains the full name of the weakly referenced message. Weak string // Wire is a string representation of the wire type. Wire string // WireType is the protobuf wire type for the field. WireType int // Tag is the protobuf field number. Tag int // Required reports whether this is a required field. Required bool // Optional reports whether this is a optional field. Optional bool // Repeated reports whether this is a repeated field. Repeated bool // Packed reports whether this is a packed repeated field of scalars. Packed bool // Proto3 reports whether this field operates under the proto3 syntax. Proto3 bool // Oneof reports whether this field belongs within a oneof. Oneof bool // Default is the default value in string form. Default string // HasDefault reports whether the field has a default value. HasDefault bool // MapKeyProp is the properties for the key field for a map field. MapKeyProp *Properties // MapValProp is the properties for the value field for a map field. MapValProp *Properties } // OneofProperties represents the type information for a protobuf oneof. // // Deprecated: Do not use. type OneofProperties struct { // Type is a pointer to the generated wrapper type for the field value. // This is nil for messages that are not in the open-struct API. Type reflect.Type // Field is the index into StructProperties.Prop for the containing oneof. Field int // Prop is the properties for the field. Prop *Properties } // String formats the properties in the protobuf struct field tag style. func (p *Properties) String() string { s := p.Wire s += "," + strconv.Itoa(p.Tag) if p.Required { s += ",req" } if p.Optional { s += ",opt" } if p.Repeated { s += ",rep" } if p.Packed { s += ",packed" } s += ",name=" + p.OrigName if p.JSONName != "" { s += ",json=" + p.JSONName } if len(p.Enum) > 0 { s += ",enum=" + p.Enum } if len(p.Weak) > 0 { s += ",weak=" + p.Weak } if p.Proto3 { s += ",proto3" } if p.Oneof { s += ",oneof" } if p.HasDefault { s += ",def=" + p.Default } return s } // Parse populates p by parsing a string in the protobuf struct field tag style. func (p *Properties) Parse(tag string) { // For example: "bytes,49,opt,name=foo,def=hello!" for len(tag) > 0 { i := strings.IndexByte(tag, ',') if i < 0 { i = len(tag) } switch s := tag[:i]; { case strings.HasPrefix(s, "name="): p.OrigName = s[len("name="):] case strings.HasPrefix(s, "json="): p.JSONName = s[len("json="):] case strings.HasPrefix(s, "enum="): p.Enum = s[len("enum="):] case strings.HasPrefix(s, "weak="): p.Weak = s[len("weak="):] case strings.Trim(s, "0123456789") == "": n, _ := strconv.ParseUint(s, 10, 32) p.Tag = int(n) case s == "opt": p.Optional = true case s == "req": p.Required = true case s == "rep": p.Repeated = true case s == "varint" || s == "zigzag32" || s == "zigzag64": p.Wire = s p.WireType = WireVarint case s == "fixed32": p.Wire = s p.WireType = WireFixed32 case s == "fixed64": p.Wire = s p.WireType = WireFixed64 case s == "bytes": p.Wire = s p.WireType = WireBytes case s == "group": p.Wire = s p.WireType = WireStartGroup case s == "packed": p.Packed = true case s == "proto3": p.Proto3 = true case s == "oneof": p.Oneof = true case strings.HasPrefix(s, "def="): // The default tag is special in that everything afterwards is the // default regardless of the presence of commas. p.HasDefault = true p.Default, i = tag[len("def="):], len(tag) } tag = strings.TrimPrefix(tag[i:], ",") } } // Init populates the properties from a protocol buffer struct tag. // // Deprecated: Do not use. func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) { p.Name = name p.OrigName = name if tag == "" { return } p.Parse(tag) if typ != nil && typ.Kind() == reflect.Map { p.MapKeyProp = new(Properties) p.MapKeyProp.Init(nil, "Key", f.Tag.Get("protobuf_key"), nil) p.MapValProp = new(Properties) p.MapValProp.Init(nil, "Value", f.Tag.Get("protobuf_val"), nil) } } var propertiesCache sync.Map // map[reflect.Type]*StructProperties // GetProperties returns the list of properties for the type represented by t, // which must be a generated protocol buffer message in the open-struct API, // where protobuf message fields are represented by exported Go struct fields. // // Deprecated: Use protobuf reflection instead. func GetProperties(t reflect.Type) *StructProperties { if p, ok := propertiesCache.Load(t); ok { return p.(*StructProperties) } p, _ := propertiesCache.LoadOrStore(t, newProperties(t)) return p.(*StructProperties) } func newProperties(t reflect.Type) *StructProperties { if t.Kind() != reflect.Struct { panic(fmt.Sprintf("%v is not a generated message in the open-struct API", t)) } var hasOneof bool prop := new(StructProperties) // Construct a list of properties for each field in the struct. for i := 0; i < t.NumField(); i++ { p := new(Properties) f := t.Field(i) tagField := f.Tag.Get("protobuf") p.Init(f.Type, f.Name, tagField, &f) tagOneof := f.Tag.Get("protobuf_oneof") if tagOneof != "" { hasOneof = true p.OrigName = tagOneof } // Rename unrelated struct fields with the "XXX_" prefix since so much // user code simply checks for this to exclude special fields. if tagField == "" && tagOneof == "" && !strings.HasPrefix(p.Name, "XXX_") { p.Name = "XXX_" + p.Name p.OrigName = "XXX_" + p.OrigName } else if p.Weak != "" { p.Name = p.OrigName // avoid possible "XXX_" prefix on weak field } prop.Prop = append(prop.Prop, p) } // Construct a mapping of oneof field names to properties. if hasOneof { var oneofWrappers []interface{} if fn, ok := reflect.PtrTo(t).MethodByName("XXX_OneofFuncs"); ok { oneofWrappers = fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))})[3].Interface().([]interface{}) } if fn, ok := reflect.PtrTo(t).MethodByName("XXX_OneofWrappers"); ok { oneofWrappers = fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))})[0].Interface().([]interface{}) } if m, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(protoreflect.ProtoMessage); ok { if m, ok := m.ProtoReflect().(interface{ ProtoMessageInfo() *protoimpl.MessageInfo }); ok { oneofWrappers = m.ProtoMessageInfo().OneofWrappers } } prop.OneofTypes = make(map[string]*OneofProperties) for _, wrapper := range oneofWrappers { p := &OneofProperties{ Type: reflect.ValueOf(wrapper).Type(), // *T Prop: new(Properties), } f := p.Type.Elem().Field(0) p.Prop.Name = f.Name p.Prop.Parse(f.Tag.Get("protobuf")) // Determine the struct field that contains this oneof. // Each wrapper is assignable to exactly one parent field. var foundOneof bool for i := 0; i < t.NumField() && !foundOneof; i++ { if p.Type.AssignableTo(t.Field(i).Type) { p.Field = i foundOneof = true } } if !foundOneof { panic(fmt.Sprintf("%v is not a generated message in the open-struct API", t)) } prop.OneofTypes[p.Prop.OrigName] = p } } return prop } func (sp *StructProperties) Len() int { return len(sp.Prop) } func (sp *StructProperties) Less(i, j int) bool { return false } func (sp *StructProperties) Swap(i, j int) { return }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/golang/protobuf/proto/wrappers.go
vendor/github.com/golang/protobuf/proto/wrappers.go
// Copyright 2019 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package proto // Bool stores v in a new bool value and returns a pointer to it. func Bool(v bool) *bool { return &v } // Int stores v in a new int32 value and returns a pointer to it. // // Deprecated: Use Int32 instead. func Int(v int) *int32 { return Int32(int32(v)) } // Int32 stores v in a new int32 value and returns a pointer to it. func Int32(v int32) *int32 { return &v } // Int64 stores v in a new int64 value and returns a pointer to it. func Int64(v int64) *int64 { return &v } // Uint32 stores v in a new uint32 value and returns a pointer to it. func Uint32(v uint32) *uint32 { return &v } // Uint64 stores v in a new uint64 value and returns a pointer to it. func Uint64(v uint64) *uint64 { return &v } // Float32 stores v in a new float32 value and returns a pointer to it. func Float32(v float32) *float32 { return &v } // Float64 stores v in a new float64 value and returns a pointer to it. func Float64(v float64) *float64 { return &v } // String stores v in a new string value and returns a pointer to it. func String(v string) *string { return &v }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/golang/protobuf/ptypes/timestamp.go
vendor/github.com/golang/protobuf/ptypes/timestamp.go
// Copyright 2016 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package ptypes import ( "errors" "fmt" "time" timestamppb "github.com/golang/protobuf/ptypes/timestamp" ) // Range of google.protobuf.Duration as specified in timestamp.proto. const ( // Seconds field of the earliest valid Timestamp. // This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). minValidSeconds = -62135596800 // Seconds field just after the latest valid Timestamp. // This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). maxValidSeconds = 253402300800 ) // Timestamp converts a timestamppb.Timestamp to a time.Time. // It returns an error if the argument is invalid. // // Unlike most Go functions, if Timestamp returns an error, the first return // value is not the zero time.Time. Instead, it is the value obtained from the // time.Unix function when passed the contents of the Timestamp, in the UTC // locale. This may or may not be a meaningful time; many invalid Timestamps // do map to valid time.Times. // // A nil Timestamp returns an error. The first return value in that case is // undefined. // // Deprecated: Call the ts.AsTime and ts.CheckValid methods instead. func Timestamp(ts *timestamppb.Timestamp) (time.Time, error) { // Don't return the zero value on error, because corresponds to a valid // timestamp. Instead return whatever time.Unix gives us. var t time.Time if ts == nil { t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp } else { t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC() } return t, validateTimestamp(ts) } // TimestampNow returns a google.protobuf.Timestamp for the current time. // // Deprecated: Call the timestamppb.Now function instead. func TimestampNow() *timestamppb.Timestamp { ts, err := TimestampProto(time.Now()) if err != nil { panic("ptypes: time.Now() out of Timestamp range") } return ts } // TimestampProto converts the time.Time to a google.protobuf.Timestamp proto. // It returns an error if the resulting Timestamp is invalid. // // Deprecated: Call the timestamppb.New function instead. func TimestampProto(t time.Time) (*timestamppb.Timestamp, error) { ts := &timestamppb.Timestamp{ Seconds: t.Unix(), Nanos: int32(t.Nanosecond()), } if err := validateTimestamp(ts); err != nil { return nil, err } return ts, nil } // TimestampString returns the RFC 3339 string for valid Timestamps. // For invalid Timestamps, it returns an error message in parentheses. // // Deprecated: Call the ts.AsTime method instead, // followed by a call to the Format method on the time.Time value. func TimestampString(ts *timestamppb.Timestamp) string { t, err := Timestamp(ts) if err != nil { return fmt.Sprintf("(%v)", err) } return t.Format(time.RFC3339Nano) } // validateTimestamp determines whether a Timestamp is valid. // A valid timestamp represents a time in the range [0001-01-01, 10000-01-01) // and has a Nanos field in the range [0, 1e9). // // If the Timestamp is valid, validateTimestamp returns nil. // Otherwise, it returns an error that describes the problem. // // Every valid Timestamp can be represented by a time.Time, // but the converse is not true. func validateTimestamp(ts *timestamppb.Timestamp) error { if ts == nil { return errors.New("timestamp: nil Timestamp") } if ts.Seconds < minValidSeconds { return fmt.Errorf("timestamp: %v before 0001-01-01", ts) } if ts.Seconds >= maxValidSeconds { return fmt.Errorf("timestamp: %v after 10000-01-01", ts) } if ts.Nanos < 0 || ts.Nanos >= 1e9 { return fmt.Errorf("timestamp: %v: nanos not in range [0, 1e9)", ts) } return nil }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/golang/protobuf/ptypes/any.go
vendor/github.com/golang/protobuf/ptypes/any.go
// Copyright 2016 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package ptypes import ( "fmt" "strings" "github.com/golang/protobuf/proto" "google.golang.org/protobuf/reflect/protoreflect" "google.golang.org/protobuf/reflect/protoregistry" anypb "github.com/golang/protobuf/ptypes/any" ) const urlPrefix = "type.googleapis.com/" // AnyMessageName returns the message name contained in an anypb.Any message. // Most type assertions should use the Is function instead. // // Deprecated: Call the any.MessageName method instead. func AnyMessageName(any *anypb.Any) (string, error) { name, err := anyMessageName(any) return string(name), err } func anyMessageName(any *anypb.Any) (protoreflect.FullName, error) { if any == nil { return "", fmt.Errorf("message is nil") } name := protoreflect.FullName(any.TypeUrl) if i := strings.LastIndex(any.TypeUrl, "/"); i >= 0 { name = name[i+len("/"):] } if !name.IsValid() { return "", fmt.Errorf("message type url %q is invalid", any.TypeUrl) } return name, nil } // MarshalAny marshals the given message m into an anypb.Any message. // // Deprecated: Call the anypb.New function instead. func MarshalAny(m proto.Message) (*anypb.Any, error) { switch dm := m.(type) { case DynamicAny: m = dm.Message case *DynamicAny: if dm == nil { return nil, proto.ErrNil } m = dm.Message } b, err := proto.Marshal(m) if err != nil { return nil, err } return &anypb.Any{TypeUrl: urlPrefix + proto.MessageName(m), Value: b}, nil } // Empty returns a new message of the type specified in an anypb.Any message. // It returns protoregistry.NotFound if the corresponding message type could not // be resolved in the global registry. // // Deprecated: Use protoregistry.GlobalTypes.FindMessageByName instead // to resolve the message name and create a new instance of it. func Empty(any *anypb.Any) (proto.Message, error) { name, err := anyMessageName(any) if err != nil { return nil, err } mt, err := protoregistry.GlobalTypes.FindMessageByName(name) if err != nil { return nil, err } return proto.MessageV1(mt.New().Interface()), nil } // UnmarshalAny unmarshals the encoded value contained in the anypb.Any message // into the provided message m. It returns an error if the target message // does not match the type in the Any message or if an unmarshal error occurs. // // The target message m may be a *DynamicAny message. If the underlying message // type could not be resolved, then this returns protoregistry.NotFound. // // Deprecated: Call the any.UnmarshalTo method instead. func UnmarshalAny(any *anypb.Any, m proto.Message) error { if dm, ok := m.(*DynamicAny); ok { if dm.Message == nil { var err error dm.Message, err = Empty(any) if err != nil { return err } } m = dm.Message } anyName, err := AnyMessageName(any) if err != nil { return err } msgName := proto.MessageName(m) if anyName != msgName { return fmt.Errorf("mismatched message type: got %q want %q", anyName, msgName) } return proto.Unmarshal(any.Value, m) } // Is reports whether the Any message contains a message of the specified type. // // Deprecated: Call the any.MessageIs method instead. func Is(any *anypb.Any, m proto.Message) bool { if any == nil || m == nil { return false } name := proto.MessageName(m) if !strings.HasSuffix(any.TypeUrl, name) { return false } return len(any.TypeUrl) == len(name) || any.TypeUrl[len(any.TypeUrl)-len(name)-1] == '/' } // DynamicAny is a value that can be passed to UnmarshalAny to automatically // allocate a proto.Message for the type specified in an anypb.Any message. // The allocated message is stored in the embedded proto.Message. // // Example: // // var x ptypes.DynamicAny // if err := ptypes.UnmarshalAny(a, &x); err != nil { ... } // fmt.Printf("unmarshaled message: %v", x.Message) // // Deprecated: Use the any.UnmarshalNew method instead to unmarshal // the any message contents into a new instance of the underlying message. type DynamicAny struct{ proto.Message } func (m DynamicAny) String() string { if m.Message == nil { return "<nil>" } return m.Message.String() } func (m DynamicAny) Reset() { if m.Message == nil { return } m.Message.Reset() } func (m DynamicAny) ProtoMessage() { return } func (m DynamicAny) ProtoReflect() protoreflect.Message { if m.Message == nil { return nil } return dynamicAny{proto.MessageReflect(m.Message)} } type dynamicAny struct{ protoreflect.Message } func (m dynamicAny) Type() protoreflect.MessageType { return dynamicAnyType{m.Message.Type()} } func (m dynamicAny) New() protoreflect.Message { return dynamicAnyType{m.Message.Type()}.New() } func (m dynamicAny) Interface() protoreflect.ProtoMessage { return DynamicAny{proto.MessageV1(m.Message.Interface())} } type dynamicAnyType struct{ protoreflect.MessageType } func (t dynamicAnyType) New() protoreflect.Message { return dynamicAny{t.MessageType.New()} } func (t dynamicAnyType) Zero() protoreflect.Message { return dynamicAny{t.MessageType.Zero()} }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/golang/protobuf/ptypes/doc.go
vendor/github.com/golang/protobuf/ptypes/doc.go
// Copyright 2016 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Package ptypes provides functionality for interacting with well-known types. // // Deprecated: Well-known types have specialized functionality directly // injected into the generated packages for each message type. // See the deprecation notice for each function for the suggested alternative. package ptypes
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/golang/protobuf/ptypes/duration.go
vendor/github.com/golang/protobuf/ptypes/duration.go
// Copyright 2016 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package ptypes import ( "errors" "fmt" "time" durationpb "github.com/golang/protobuf/ptypes/duration" ) // Range of google.protobuf.Duration as specified in duration.proto. // This is about 10,000 years in seconds. const ( maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60) minSeconds = -maxSeconds ) // Duration converts a durationpb.Duration to a time.Duration. // Duration returns an error if dur is invalid or overflows a time.Duration. // // Deprecated: Call the dur.AsDuration and dur.CheckValid methods instead. func Duration(dur *durationpb.Duration) (time.Duration, error) { if err := validateDuration(dur); err != nil { return 0, err } d := time.Duration(dur.Seconds) * time.Second if int64(d/time.Second) != dur.Seconds { return 0, fmt.Errorf("duration: %v is out of range for time.Duration", dur) } if dur.Nanos != 0 { d += time.Duration(dur.Nanos) * time.Nanosecond if (d < 0) != (dur.Nanos < 0) { return 0, fmt.Errorf("duration: %v is out of range for time.Duration", dur) } } return d, nil } // DurationProto converts a time.Duration to a durationpb.Duration. // // Deprecated: Call the durationpb.New function instead. func DurationProto(d time.Duration) *durationpb.Duration { nanos := d.Nanoseconds() secs := nanos / 1e9 nanos -= secs * 1e9 return &durationpb.Duration{ Seconds: int64(secs), Nanos: int32(nanos), } } // validateDuration determines whether the durationpb.Duration is valid // according to the definition in google/protobuf/duration.proto. // A valid durpb.Duration may still be too large to fit into a time.Duration // Note that the range of durationpb.Duration is about 10,000 years, // while the range of time.Duration is about 290 years. func validateDuration(dur *durationpb.Duration) error { if dur == nil { return errors.New("duration: nil Duration") } if dur.Seconds < minSeconds || dur.Seconds > maxSeconds { return fmt.Errorf("duration: %v: seconds out of range", dur) } if dur.Nanos <= -1e9 || dur.Nanos >= 1e9 { return fmt.Errorf("duration: %v: nanos out of range", dur) } // Seconds and Nanos must have the same sign, unless d.Nanos is zero. if (dur.Seconds < 0 && dur.Nanos > 0) || (dur.Seconds > 0 && dur.Nanos < 0) { return fmt.Errorf("duration: %v: seconds and nanos have different signs", dur) } return nil }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go
vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go
// Code generated by protoc-gen-go. DO NOT EDIT. // source: github.com/golang/protobuf/ptypes/timestamp/timestamp.proto package timestamp import ( protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" timestamppb "google.golang.org/protobuf/types/known/timestamppb" reflect "reflect" ) // Symbols defined in public import of google/protobuf/timestamp.proto. type Timestamp = timestamppb.Timestamp var File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto protoreflect.FileDescriptor var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc = []byte{ 0x0a, 0x3b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x37, 0x5a, 0x35, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x3b, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes = []interface{}{} var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs = []int32{ 0, // [0:0] is the sub-list for method output_type 0, // [0:0] is the sub-list for method input_type 0, // [0:0] is the sub-list for extension type_name 0, // [0:0] is the sub-list for extension extendee 0, // [0:0] is the sub-list for field type_name } func init() { file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_init() } func file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_init() { if File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto != nil { return } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc, NumEnums: 0, NumMessages: 0, NumExtensions: 0, NumServices: 0, }, GoTypes: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes, DependencyIndexes: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs, }.Build() File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto = out.File file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc = nil file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes = nil file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs = nil }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go
vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go
// Code generated by protoc-gen-go. DO NOT EDIT. // source: github.com/golang/protobuf/ptypes/duration/duration.proto package duration import ( protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" durationpb "google.golang.org/protobuf/types/known/durationpb" reflect "reflect" ) // Symbols defined in public import of google/protobuf/duration.proto. type Duration = durationpb.Duration var File_github_com_golang_protobuf_ptypes_duration_duration_proto protoreflect.FileDescriptor var file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc = []byte{ 0x0a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x35, 0x5a, 0x33, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x3b, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes = []interface{}{} var file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs = []int32{ 0, // [0:0] is the sub-list for method output_type 0, // [0:0] is the sub-list for method input_type 0, // [0:0] is the sub-list for extension type_name 0, // [0:0] is the sub-list for extension extendee 0, // [0:0] is the sub-list for field type_name } func init() { file_github_com_golang_protobuf_ptypes_duration_duration_proto_init() } func file_github_com_golang_protobuf_ptypes_duration_duration_proto_init() { if File_github_com_golang_protobuf_ptypes_duration_duration_proto != nil { return } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc, NumEnums: 0, NumMessages: 0, NumExtensions: 0, NumServices: 0, }, GoTypes: file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes, DependencyIndexes: file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs, }.Build() File_github_com_golang_protobuf_ptypes_duration_duration_proto = out.File file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc = nil file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes = nil file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs = nil }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go
vendor/github.com/golang/protobuf/ptypes/any/any.pb.go
// Code generated by protoc-gen-go. DO NOT EDIT. // source: github.com/golang/protobuf/ptypes/any/any.proto package any import ( protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" anypb "google.golang.org/protobuf/types/known/anypb" reflect "reflect" ) // Symbols defined in public import of google/protobuf/any.proto. type Any = anypb.Any var File_github_com_golang_protobuf_ptypes_any_any_proto protoreflect.FileDescriptor var file_github_com_golang_protobuf_ptypes_any_any_proto_rawDesc = []byte{ 0x0a, 0x2f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x61, 0x6e, 0x79, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x2b, 0x5a, 0x29, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x61, 0x6e, 0x79, 0x3b, 0x61, 0x6e, 0x79, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var file_github_com_golang_protobuf_ptypes_any_any_proto_goTypes = []interface{}{} var file_github_com_golang_protobuf_ptypes_any_any_proto_depIdxs = []int32{ 0, // [0:0] is the sub-list for method output_type 0, // [0:0] is the sub-list for method input_type 0, // [0:0] is the sub-list for extension type_name 0, // [0:0] is the sub-list for extension extendee 0, // [0:0] is the sub-list for field type_name } func init() { file_github_com_golang_protobuf_ptypes_any_any_proto_init() } func file_github_com_golang_protobuf_ptypes_any_any_proto_init() { if File_github_com_golang_protobuf_ptypes_any_any_proto != nil { return } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_github_com_golang_protobuf_ptypes_any_any_proto_rawDesc, NumEnums: 0, NumMessages: 0, NumExtensions: 0, NumServices: 0, }, GoTypes: file_github_com_golang_protobuf_ptypes_any_any_proto_goTypes, DependencyIndexes: file_github_com_golang_protobuf_ptypes_any_any_proto_depIdxs, }.Build() File_github_com_golang_protobuf_ptypes_any_any_proto = out.File file_github_com_golang_protobuf_ptypes_any_any_proto_rawDesc = nil file_github_com_golang_protobuf_ptypes_any_any_proto_goTypes = nil file_github_com_golang_protobuf_ptypes_any_any_proto_depIdxs = nil }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/pelletier/go-toml/v2/localtime.go
vendor/github.com/pelletier/go-toml/v2/localtime.go
package toml import ( "fmt" "strings" "time" "github.com/pelletier/go-toml/v2/unstable" ) // LocalDate represents a calendar day in no specific timezone. type LocalDate struct { Year int Month int Day int } // AsTime converts d into a specific time instance at midnight in zone. func (d LocalDate) AsTime(zone *time.Location) time.Time { return time.Date(d.Year, time.Month(d.Month), d.Day, 0, 0, 0, 0, zone) } // String returns RFC 3339 representation of d. func (d LocalDate) String() string { return fmt.Sprintf("%04d-%02d-%02d", d.Year, d.Month, d.Day) } // MarshalText returns RFC 3339 representation of d. func (d LocalDate) MarshalText() ([]byte, error) { return []byte(d.String()), nil } // UnmarshalText parses b using RFC 3339 to fill d. func (d *LocalDate) UnmarshalText(b []byte) error { res, err := parseLocalDate(b) if err != nil { return err } *d = res return nil } // LocalTime represents a time of day of no specific day in no specific // timezone. type LocalTime struct { Hour int // Hour of the day: [0; 24[ Minute int // Minute of the hour: [0; 60[ Second int // Second of the minute: [0; 60[ Nanosecond int // Nanoseconds within the second: [0, 1000000000[ Precision int // Number of digits to display for Nanosecond. } // String returns RFC 3339 representation of d. // If d.Nanosecond and d.Precision are zero, the time won't have a nanosecond // component. If d.Nanosecond > 0 but d.Precision = 0, then the minimum number // of digits for nanoseconds is provided. func (d LocalTime) String() string { s := fmt.Sprintf("%02d:%02d:%02d", d.Hour, d.Minute, d.Second) if d.Precision > 0 { s += fmt.Sprintf(".%09d", d.Nanosecond)[:d.Precision+1] } else if d.Nanosecond > 0 { // Nanoseconds are specified, but precision is not provided. Use the // minimum. s += strings.Trim(fmt.Sprintf(".%09d", d.Nanosecond), "0") } return s } // MarshalText returns RFC 3339 representation of d. func (d LocalTime) MarshalText() ([]byte, error) { return []byte(d.String()), nil } // UnmarshalText parses b using RFC 3339 to fill d. func (d *LocalTime) UnmarshalText(b []byte) error { res, left, err := parseLocalTime(b) if err == nil && len(left) != 0 { err = unstable.NewParserError(left, "extra characters") } if err != nil { return err } *d = res return nil } // LocalDateTime represents a time of a specific day in no specific timezone. type LocalDateTime struct { LocalDate LocalTime } // AsTime converts d into a specific time instance in zone. func (d LocalDateTime) AsTime(zone *time.Location) time.Time { return time.Date(d.Year, time.Month(d.Month), d.Day, d.Hour, d.Minute, d.Second, d.Nanosecond, zone) } // String returns RFC 3339 representation of d. func (d LocalDateTime) String() string { return d.LocalDate.String() + "T" + d.LocalTime.String() } // MarshalText returns RFC 3339 representation of d. func (d LocalDateTime) MarshalText() ([]byte, error) { return []byte(d.String()), nil } // UnmarshalText parses b using RFC 3339 to fill d. func (d *LocalDateTime) UnmarshalText(data []byte) error { res, left, err := parseLocalDateTime(data) if err == nil && len(left) != 0 { err = unstable.NewParserError(left, "extra characters") } if err != nil { return err } *d = res return nil }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/pelletier/go-toml/v2/types.go
vendor/github.com/pelletier/go-toml/v2/types.go
package toml import ( "encoding" "reflect" "time" ) var timeType = reflect.TypeOf((*time.Time)(nil)).Elem() var textMarshalerType = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem() var textUnmarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem() var mapStringInterfaceType = reflect.TypeOf(map[string]interface{}(nil)) var sliceInterfaceType = reflect.TypeOf([]interface{}(nil)) var stringType = reflect.TypeOf("")
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/pelletier/go-toml/v2/marshaler.go
vendor/github.com/pelletier/go-toml/v2/marshaler.go
package toml import ( "bytes" "encoding" "encoding/json" "fmt" "io" "math" "reflect" "slices" "strconv" "strings" "time" "unicode" "github.com/pelletier/go-toml/v2/internal/characters" ) // Marshal serializes a Go value as a TOML document. // // It is a shortcut for Encoder.Encode() with the default options. func Marshal(v interface{}) ([]byte, error) { var buf bytes.Buffer enc := NewEncoder(&buf) err := enc.Encode(v) if err != nil { return nil, err } return buf.Bytes(), nil } // Encoder writes a TOML document to an output stream. type Encoder struct { // output w io.Writer // global settings tablesInline bool arraysMultiline bool indentSymbol string indentTables bool marshalJsonNumbers bool } // NewEncoder returns a new Encoder that writes to w. func NewEncoder(w io.Writer) *Encoder { return &Encoder{ w: w, indentSymbol: " ", } } // SetTablesInline forces the encoder to emit all tables inline. // // This behavior can be controlled on an individual struct field basis with the // inline tag: // // MyField `toml:",inline"` func (enc *Encoder) SetTablesInline(inline bool) *Encoder { enc.tablesInline = inline return enc } // SetArraysMultiline forces the encoder to emit all arrays with one element per // line. // // This behavior can be controlled on an individual struct field basis with the multiline tag: // // MyField `multiline:"true"` func (enc *Encoder) SetArraysMultiline(multiline bool) *Encoder { enc.arraysMultiline = multiline return enc } // SetIndentSymbol defines the string that should be used for indentation. The // provided string is repeated for each indentation level. Defaults to two // spaces. func (enc *Encoder) SetIndentSymbol(s string) *Encoder { enc.indentSymbol = s return enc } // SetIndentTables forces the encoder to intent tables and array tables. func (enc *Encoder) SetIndentTables(indent bool) *Encoder { enc.indentTables = indent return enc } // SetMarshalJsonNumbers forces the encoder to serialize `json.Number` as a // float or integer instead of relying on TextMarshaler to emit a string. // // *Unstable:* This method does not follow the compatibility guarantees of // semver. It can be changed or removed without a new major version being // issued. func (enc *Encoder) SetMarshalJsonNumbers(indent bool) *Encoder { enc.marshalJsonNumbers = indent return enc } // Encode writes a TOML representation of v to the stream. // // If v cannot be represented to TOML it returns an error. // // # Encoding rules // // A top level slice containing only maps or structs is encoded as [[table // array]]. // // All slices not matching rule 1 are encoded as [array]. As a result, any map // or struct they contain is encoded as an {inline table}. // // Nil interfaces and nil pointers are not supported. // // Keys in key-values always have one part. // // Intermediate tables are always printed. // // By default, strings are encoded as literal string, unless they contain either // a newline character or a single quote. In that case they are emitted as // quoted strings. // // Unsigned integers larger than math.MaxInt64 cannot be encoded. Doing so // results in an error. This rule exists because the TOML specification only // requires parsers to support at least the 64 bits integer range. Allowing // larger numbers would create non-standard TOML documents, which may not be // readable (at best) by other implementations. To encode such numbers, a // solution is a custom type that implements encoding.TextMarshaler. // // When encoding structs, fields are encoded in order of definition, with their // exact name. // // Tables and array tables are separated by empty lines. However, consecutive // subtables definitions are not. For example: // // [top1] // // [top2] // [top2.child1] // // [[array]] // // [[array]] // [array.child2] // // # Struct tags // // The encoding of each public struct field can be customized by the format // string in the "toml" key of the struct field's tag. This follows // encoding/json's convention. The format string starts with the name of the // field, optionally followed by a comma-separated list of options. The name may // be empty in order to provide options without overriding the default name. // // The "multiline" option emits strings as quoted multi-line TOML strings. It // has no effect on fields that would not be encoded as strings. // // The "inline" option turns fields that would be emitted as tables into inline // tables instead. It has no effect on other fields. // // The "omitempty" option prevents empty values or groups from being emitted. // // The "commented" option prefixes the value and all its children with a comment // symbol. // // In addition to the "toml" tag struct tag, a "comment" tag can be used to emit // a TOML comment before the value being annotated. Comments are ignored inside // inline tables. For array tables, the comment is only present before the first // element of the array. func (enc *Encoder) Encode(v interface{}) error { var ( b []byte ctx encoderCtx ) ctx.inline = enc.tablesInline if v == nil { return fmt.Errorf("toml: cannot encode a nil interface") } b, err := enc.encode(b, ctx, reflect.ValueOf(v)) if err != nil { return err } _, err = enc.w.Write(b) if err != nil { return fmt.Errorf("toml: cannot write: %w", err) } return nil } type valueOptions struct { multiline bool omitempty bool commented bool comment string } type encoderCtx struct { // Current top-level key. parentKey []string // Key that should be used for a KV. key string // Extra flag to account for the empty string hasKey bool // Set to true to indicate that the encoder is inside a KV, so that all // tables need to be inlined. insideKv bool // Set to true to skip the first table header in an array table. skipTableHeader bool // Should the next table be encoded as inline inline bool // Indentation level indent int // Prefix the current value with a comment. commented bool // Options coming from struct tags options valueOptions } func (ctx *encoderCtx) shiftKey() { if ctx.hasKey { ctx.parentKey = append(ctx.parentKey, ctx.key) ctx.clearKey() } } func (ctx *encoderCtx) setKey(k string) { ctx.key = k ctx.hasKey = true } func (ctx *encoderCtx) clearKey() { ctx.key = "" ctx.hasKey = false } func (ctx *encoderCtx) isRoot() bool { return len(ctx.parentKey) == 0 && !ctx.hasKey } func (enc *Encoder) encode(b []byte, ctx encoderCtx, v reflect.Value) ([]byte, error) { i := v.Interface() switch x := i.(type) { case time.Time: if x.Nanosecond() > 0 { return x.AppendFormat(b, time.RFC3339Nano), nil } return x.AppendFormat(b, time.RFC3339), nil case LocalTime: return append(b, x.String()...), nil case LocalDate: return append(b, x.String()...), nil case LocalDateTime: return append(b, x.String()...), nil case json.Number: if enc.marshalJsonNumbers { if x == "" { /// Useful zero value. return append(b, "0"...), nil } else if v, err := x.Int64(); err == nil { return enc.encode(b, ctx, reflect.ValueOf(v)) } else if f, err := x.Float64(); err == nil { return enc.encode(b, ctx, reflect.ValueOf(f)) } else { return nil, fmt.Errorf("toml: unable to convert %q to int64 or float64", x) } } } hasTextMarshaler := v.Type().Implements(textMarshalerType) if hasTextMarshaler || (v.CanAddr() && reflect.PointerTo(v.Type()).Implements(textMarshalerType)) { if !hasTextMarshaler { v = v.Addr() } if ctx.isRoot() { return nil, fmt.Errorf("toml: type %s implementing the TextMarshaler interface cannot be a root element", v.Type()) } text, err := v.Interface().(encoding.TextMarshaler).MarshalText() if err != nil { return nil, err } b = enc.encodeString(b, string(text), ctx.options) return b, nil } switch v.Kind() { // containers case reflect.Map: return enc.encodeMap(b, ctx, v) case reflect.Struct: return enc.encodeStruct(b, ctx, v) case reflect.Slice, reflect.Array: return enc.encodeSlice(b, ctx, v) case reflect.Interface: if v.IsNil() { return nil, fmt.Errorf("toml: encoding a nil interface is not supported") } return enc.encode(b, ctx, v.Elem()) case reflect.Ptr: if v.IsNil() { return enc.encode(b, ctx, reflect.Zero(v.Type().Elem())) } return enc.encode(b, ctx, v.Elem()) // values case reflect.String: b = enc.encodeString(b, v.String(), ctx.options) case reflect.Float32: f := v.Float() if math.IsNaN(f) { b = append(b, "nan"...) } else if f > math.MaxFloat32 { b = append(b, "inf"...) } else if f < -math.MaxFloat32 { b = append(b, "-inf"...) } else if math.Trunc(f) == f { b = strconv.AppendFloat(b, f, 'f', 1, 32) } else { b = strconv.AppendFloat(b, f, 'f', -1, 32) } case reflect.Float64: f := v.Float() if math.IsNaN(f) { b = append(b, "nan"...) } else if f > math.MaxFloat64 { b = append(b, "inf"...) } else if f < -math.MaxFloat64 { b = append(b, "-inf"...) } else if math.Trunc(f) == f { b = strconv.AppendFloat(b, f, 'f', 1, 64) } else { b = strconv.AppendFloat(b, f, 'f', -1, 64) } case reflect.Bool: if v.Bool() { b = append(b, "true"...) } else { b = append(b, "false"...) } case reflect.Uint64, reflect.Uint32, reflect.Uint16, reflect.Uint8, reflect.Uint: x := v.Uint() if x > uint64(math.MaxInt64) { return nil, fmt.Errorf("toml: not encoding uint (%d) greater than max int64 (%d)", x, int64(math.MaxInt64)) } b = strconv.AppendUint(b, x, 10) case reflect.Int64, reflect.Int32, reflect.Int16, reflect.Int8, reflect.Int: b = strconv.AppendInt(b, v.Int(), 10) default: return nil, fmt.Errorf("toml: cannot encode value of type %s", v.Kind()) } return b, nil } func isNil(v reflect.Value) bool { switch v.Kind() { case reflect.Ptr, reflect.Interface, reflect.Map: return v.IsNil() default: return false } } func shouldOmitEmpty(options valueOptions, v reflect.Value) bool { return options.omitempty && isEmptyValue(v) } func (enc *Encoder) encodeKv(b []byte, ctx encoderCtx, options valueOptions, v reflect.Value) ([]byte, error) { var err error if !ctx.inline { b = enc.encodeComment(ctx.indent, options.comment, b) b = enc.commented(ctx.commented, b) b = enc.indent(ctx.indent, b) } b = enc.encodeKey(b, ctx.key) b = append(b, " = "...) // create a copy of the context because the value of a KV shouldn't // modify the global context. subctx := ctx subctx.insideKv = true subctx.shiftKey() subctx.options = options b, err = enc.encode(b, subctx, v) if err != nil { return nil, err } return b, nil } func (enc *Encoder) commented(commented bool, b []byte) []byte { if commented { return append(b, "# "...) } return b } func isEmptyValue(v reflect.Value) bool { switch v.Kind() { case reflect.Struct: return isEmptyStruct(v) case reflect.Array, reflect.Map, reflect.Slice, reflect.String: return v.Len() == 0 case reflect.Bool: return !v.Bool() case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: return v.Int() == 0 case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: return v.Uint() == 0 case reflect.Float32, reflect.Float64: return v.Float() == 0 case reflect.Interface, reflect.Ptr: return v.IsNil() } return false } func isEmptyStruct(v reflect.Value) bool { // TODO: merge with walkStruct and cache. typ := v.Type() for i := 0; i < typ.NumField(); i++ { fieldType := typ.Field(i) // only consider exported fields if fieldType.PkgPath != "" { continue } tag := fieldType.Tag.Get("toml") // special field name to skip field if tag == "-" { continue } f := v.Field(i) if !isEmptyValue(f) { return false } } return true } const literalQuote = '\'' func (enc *Encoder) encodeString(b []byte, v string, options valueOptions) []byte { if needsQuoting(v) { return enc.encodeQuotedString(options.multiline, b, v) } return enc.encodeLiteralString(b, v) } func needsQuoting(v string) bool { // TODO: vectorize for _, b := range []byte(v) { if b == '\'' || b == '\r' || b == '\n' || characters.InvalidAscii(b) { return true } } return false } // caller should have checked that the string does not contain new lines or ' . func (enc *Encoder) encodeLiteralString(b []byte, v string) []byte { b = append(b, literalQuote) b = append(b, v...) b = append(b, literalQuote) return b } func (enc *Encoder) encodeQuotedString(multiline bool, b []byte, v string) []byte { stringQuote := `"` if multiline { stringQuote = `"""` } b = append(b, stringQuote...) if multiline { b = append(b, '\n') } const ( hextable = "0123456789ABCDEF" // U+0000 to U+0008, U+000A to U+001F, U+007F nul = 0x0 bs = 0x8 lf = 0xa us = 0x1f del = 0x7f ) for _, r := range []byte(v) { switch r { case '\\': b = append(b, `\\`...) case '"': b = append(b, `\"`...) case '\b': b = append(b, `\b`...) case '\f': b = append(b, `\f`...) case '\n': if multiline { b = append(b, r) } else { b = append(b, `\n`...) } case '\r': b = append(b, `\r`...) case '\t': b = append(b, `\t`...) default: switch { case r >= nul && r <= bs, r >= lf && r <= us, r == del: b = append(b, `\u00`...) b = append(b, hextable[r>>4]) b = append(b, hextable[r&0x0f]) default: b = append(b, r) } } } b = append(b, stringQuote...) return b } // caller should have checked that the string is in A-Z / a-z / 0-9 / - / _ . func (enc *Encoder) encodeUnquotedKey(b []byte, v string) []byte { return append(b, v...) } func (enc *Encoder) encodeTableHeader(ctx encoderCtx, b []byte) ([]byte, error) { if len(ctx.parentKey) == 0 { return b, nil } b = enc.encodeComment(ctx.indent, ctx.options.comment, b) b = enc.commented(ctx.commented, b) b = enc.indent(ctx.indent, b) b = append(b, '[') b = enc.encodeKey(b, ctx.parentKey[0]) for _, k := range ctx.parentKey[1:] { b = append(b, '.') b = enc.encodeKey(b, k) } b = append(b, "]\n"...) return b, nil } //nolint:cyclop func (enc *Encoder) encodeKey(b []byte, k string) []byte { needsQuotation := false cannotUseLiteral := false if len(k) == 0 { return append(b, "''"...) } for _, c := range k { if (c >= 'A' && c <= 'Z') || (c >= 'a' && c <= 'z') || (c >= '0' && c <= '9') || c == '-' || c == '_' { continue } if c == literalQuote { cannotUseLiteral = true } needsQuotation = true } if needsQuotation && needsQuoting(k) { cannotUseLiteral = true } switch { case cannotUseLiteral: return enc.encodeQuotedString(false, b, k) case needsQuotation: return enc.encodeLiteralString(b, k) default: return enc.encodeUnquotedKey(b, k) } } func (enc *Encoder) keyToString(k reflect.Value) (string, error) { keyType := k.Type() switch { case keyType.Kind() == reflect.String: return k.String(), nil case keyType.Implements(textMarshalerType): keyB, err := k.Interface().(encoding.TextMarshaler).MarshalText() if err != nil { return "", fmt.Errorf("toml: error marshalling key %v from text: %w", k, err) } return string(keyB), nil case keyType.Kind() == reflect.Int || keyType.Kind() == reflect.Int8 || keyType.Kind() == reflect.Int16 || keyType.Kind() == reflect.Int32 || keyType.Kind() == reflect.Int64: return strconv.FormatInt(k.Int(), 10), nil case keyType.Kind() == reflect.Uint || keyType.Kind() == reflect.Uint8 || keyType.Kind() == reflect.Uint16 || keyType.Kind() == reflect.Uint32 || keyType.Kind() == reflect.Uint64: return strconv.FormatUint(k.Uint(), 10), nil case keyType.Kind() == reflect.Float32: return strconv.FormatFloat(k.Float(), 'f', -1, 32), nil case keyType.Kind() == reflect.Float64: return strconv.FormatFloat(k.Float(), 'f', -1, 64), nil } return "", fmt.Errorf("toml: type %s is not supported as a map key", keyType.Kind()) } func (enc *Encoder) encodeMap(b []byte, ctx encoderCtx, v reflect.Value) ([]byte, error) { var ( t table emptyValueOptions valueOptions ) iter := v.MapRange() for iter.Next() { v := iter.Value() if isNil(v) { continue } k, err := enc.keyToString(iter.Key()) if err != nil { return nil, err } if willConvertToTableOrArrayTable(ctx, v) { t.pushTable(k, v, emptyValueOptions) } else { t.pushKV(k, v, emptyValueOptions) } } sortEntriesByKey(t.kvs) sortEntriesByKey(t.tables) return enc.encodeTable(b, ctx, t) } func sortEntriesByKey(e []entry) { slices.SortFunc(e, func(a, b entry) int { return strings.Compare(a.Key, b.Key) }) } type entry struct { Key string Value reflect.Value Options valueOptions } type table struct { kvs []entry tables []entry } func (t *table) pushKV(k string, v reflect.Value, options valueOptions) { for _, e := range t.kvs { if e.Key == k { return } } t.kvs = append(t.kvs, entry{Key: k, Value: v, Options: options}) } func (t *table) pushTable(k string, v reflect.Value, options valueOptions) { for _, e := range t.tables { if e.Key == k { return } } t.tables = append(t.tables, entry{Key: k, Value: v, Options: options}) } func walkStruct(ctx encoderCtx, t *table, v reflect.Value) { // TODO: cache this typ := v.Type() for i := 0; i < typ.NumField(); i++ { fieldType := typ.Field(i) // only consider exported fields if fieldType.PkgPath != "" { continue } tag := fieldType.Tag.Get("toml") // special field name to skip field if tag == "-" { continue } k, opts := parseTag(tag) if !isValidName(k) { k = "" } f := v.Field(i) if k == "" { if fieldType.Anonymous { if fieldType.Type.Kind() == reflect.Struct { walkStruct(ctx, t, f) } else if fieldType.Type.Kind() == reflect.Ptr && !f.IsNil() && f.Elem().Kind() == reflect.Struct { walkStruct(ctx, t, f.Elem()) } continue } else { k = fieldType.Name } } if isNil(f) { continue } options := valueOptions{ multiline: opts.multiline, omitempty: opts.omitempty, commented: opts.commented, comment: fieldType.Tag.Get("comment"), } if opts.inline || !willConvertToTableOrArrayTable(ctx, f) { t.pushKV(k, f, options) } else { t.pushTable(k, f, options) } } } func (enc *Encoder) encodeStruct(b []byte, ctx encoderCtx, v reflect.Value) ([]byte, error) { var t table walkStruct(ctx, &t, v) return enc.encodeTable(b, ctx, t) } func (enc *Encoder) encodeComment(indent int, comment string, b []byte) []byte { for len(comment) > 0 { var line string idx := strings.IndexByte(comment, '\n') if idx >= 0 { line = comment[:idx] comment = comment[idx+1:] } else { line = comment comment = "" } b = enc.indent(indent, b) b = append(b, "# "...) b = append(b, line...) b = append(b, '\n') } return b } func isValidName(s string) bool { if s == "" { return false } for _, c := range s { switch { case strings.ContainsRune("!#$%&()*+-./:;<=>?@[]^_{|}~ ", c): // Backslash and quote chars are reserved, but // otherwise any punctuation chars are allowed // in a tag name. case !unicode.IsLetter(c) && !unicode.IsDigit(c): return false } } return true } type tagOptions struct { multiline bool inline bool omitempty bool commented bool } func parseTag(tag string) (string, tagOptions) { opts := tagOptions{} idx := strings.Index(tag, ",") if idx == -1 { return tag, opts } raw := tag[idx+1:] tag = string(tag[:idx]) for raw != "" { var o string i := strings.Index(raw, ",") if i >= 0 { o, raw = raw[:i], raw[i+1:] } else { o, raw = raw, "" } switch o { case "multiline": opts.multiline = true case "inline": opts.inline = true case "omitempty": opts.omitempty = true case "commented": opts.commented = true } } return tag, opts } func (enc *Encoder) encodeTable(b []byte, ctx encoderCtx, t table) ([]byte, error) { var err error ctx.shiftKey() if ctx.insideKv || (ctx.inline && !ctx.isRoot()) { return enc.encodeTableInline(b, ctx, t) } if !ctx.skipTableHeader { b, err = enc.encodeTableHeader(ctx, b) if err != nil { return nil, err } if enc.indentTables && len(ctx.parentKey) > 0 { ctx.indent++ } } ctx.skipTableHeader = false hasNonEmptyKV := false for _, kv := range t.kvs { if shouldOmitEmpty(kv.Options, kv.Value) { continue } hasNonEmptyKV = true ctx.setKey(kv.Key) ctx2 := ctx ctx2.commented = kv.Options.commented || ctx2.commented b, err = enc.encodeKv(b, ctx2, kv.Options, kv.Value) if err != nil { return nil, err } b = append(b, '\n') } first := true for _, table := range t.tables { if shouldOmitEmpty(table.Options, table.Value) { continue } if first { first = false if hasNonEmptyKV { b = append(b, '\n') } } else { b = append(b, "\n"...) } ctx.setKey(table.Key) ctx.options = table.Options ctx2 := ctx ctx2.commented = ctx2.commented || ctx.options.commented b, err = enc.encode(b, ctx2, table.Value) if err != nil { return nil, err } } return b, nil } func (enc *Encoder) encodeTableInline(b []byte, ctx encoderCtx, t table) ([]byte, error) { var err error b = append(b, '{') first := true for _, kv := range t.kvs { if shouldOmitEmpty(kv.Options, kv.Value) { continue } if first { first = false } else { b = append(b, `, `...) } ctx.setKey(kv.Key) b, err = enc.encodeKv(b, ctx, kv.Options, kv.Value) if err != nil { return nil, err } } if len(t.tables) > 0 { panic("inline table cannot contain nested tables, only key-values") } b = append(b, "}"...) return b, nil } func willConvertToTable(ctx encoderCtx, v reflect.Value) bool { if !v.IsValid() { return false } if v.Type() == timeType || v.Type().Implements(textMarshalerType) || (v.Kind() != reflect.Ptr && v.CanAddr() && reflect.PointerTo(v.Type()).Implements(textMarshalerType)) { return false } t := v.Type() switch t.Kind() { case reflect.Map, reflect.Struct: return !ctx.inline case reflect.Interface: return willConvertToTable(ctx, v.Elem()) case reflect.Ptr: if v.IsNil() { return false } return willConvertToTable(ctx, v.Elem()) default: return false } } func willConvertToTableOrArrayTable(ctx encoderCtx, v reflect.Value) bool { if ctx.insideKv { return false } t := v.Type() if t.Kind() == reflect.Interface { return willConvertToTableOrArrayTable(ctx, v.Elem()) } if t.Kind() == reflect.Slice || t.Kind() == reflect.Array { if v.Len() == 0 { // An empty slice should be a kv = []. return false } for i := 0; i < v.Len(); i++ { t := willConvertToTable(ctx, v.Index(i)) if !t { return false } } return true } return willConvertToTable(ctx, v) } func (enc *Encoder) encodeSlice(b []byte, ctx encoderCtx, v reflect.Value) ([]byte, error) { if v.Len() == 0 { b = append(b, "[]"...) return b, nil } if willConvertToTableOrArrayTable(ctx, v) { return enc.encodeSliceAsArrayTable(b, ctx, v) } return enc.encodeSliceAsArray(b, ctx, v) } // caller should have checked that v is a slice that only contains values that // encode into tables. func (enc *Encoder) encodeSliceAsArrayTable(b []byte, ctx encoderCtx, v reflect.Value) ([]byte, error) { ctx.shiftKey() scratch := make([]byte, 0, 64) scratch = enc.commented(ctx.commented, scratch) if enc.indentTables { scratch = enc.indent(ctx.indent, scratch) } scratch = append(scratch, "[["...) for i, k := range ctx.parentKey { if i > 0 { scratch = append(scratch, '.') } scratch = enc.encodeKey(scratch, k) } scratch = append(scratch, "]]\n"...) ctx.skipTableHeader = true b = enc.encodeComment(ctx.indent, ctx.options.comment, b) if enc.indentTables { ctx.indent++ } for i := 0; i < v.Len(); i++ { if i != 0 { b = append(b, "\n"...) } b = append(b, scratch...) var err error b, err = enc.encode(b, ctx, v.Index(i)) if err != nil { return nil, err } } return b, nil } func (enc *Encoder) encodeSliceAsArray(b []byte, ctx encoderCtx, v reflect.Value) ([]byte, error) { multiline := ctx.options.multiline || enc.arraysMultiline separator := ", " b = append(b, '[') subCtx := ctx subCtx.options = valueOptions{} if multiline { separator = ",\n" b = append(b, '\n') subCtx.indent++ } var err error first := true for i := 0; i < v.Len(); i++ { if first { first = false } else { b = append(b, separator...) } if multiline { b = enc.indent(subCtx.indent, b) } b, err = enc.encode(b, subCtx, v.Index(i)) if err != nil { return nil, err } } if multiline { b = append(b, '\n') b = enc.indent(ctx.indent, b) } b = append(b, ']') return b, nil } func (enc *Encoder) indent(level int, b []byte) []byte { for i := 0; i < level; i++ { b = append(b, enc.indentSymbol...) } return b }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/pelletier/go-toml/v2/errors.go
vendor/github.com/pelletier/go-toml/v2/errors.go
package toml import ( "fmt" "strconv" "strings" "github.com/pelletier/go-toml/v2/internal/danger" "github.com/pelletier/go-toml/v2/unstable" ) // DecodeError represents an error encountered during the parsing or decoding // of a TOML document. // // In addition to the error message, it contains the position in the document // where it happened, as well as a human-readable representation that shows // where the error occurred in the document. type DecodeError struct { message string line int column int key Key human string } // StrictMissingError occurs in a TOML document that does not have a // corresponding field in the target value. It contains all the missing fields // in Errors. // // Emitted by Decoder when DisallowUnknownFields() was called. type StrictMissingError struct { // One error per field that could not be found. Errors []DecodeError } // Error returns the canonical string for this error. func (s *StrictMissingError) Error() string { return "strict mode: fields in the document are missing in the target struct" } // String returns a human readable description of all errors. func (s *StrictMissingError) String() string { var buf strings.Builder for i, e := range s.Errors { if i > 0 { buf.WriteString("\n---\n") } buf.WriteString(e.String()) } return buf.String() } type Key []string // Error returns the error message contained in the DecodeError. func (e *DecodeError) Error() string { return "toml: " + e.message } // String returns the human-readable contextualized error. This string is multi-line. func (e *DecodeError) String() string { return e.human } // Position returns the (line, column) pair indicating where the error // occurred in the document. Positions are 1-indexed. func (e *DecodeError) Position() (row int, column int) { return e.line, e.column } // Key that was being processed when the error occurred. The key is present only // if this DecodeError is part of a StrictMissingError. func (e *DecodeError) Key() Key { return e.key } // decodeErrorFromHighlight creates a DecodeError referencing a highlighted // range of bytes from document. // // highlight needs to be a sub-slice of document, or this function panics. // // The function copies all bytes used in DecodeError, so that document and // highlight can be freely deallocated. // //nolint:funlen func wrapDecodeError(document []byte, de *unstable.ParserError) *DecodeError { offset := danger.SubsliceOffset(document, de.Highlight) errMessage := de.Error() errLine, errColumn := positionAtEnd(document[:offset]) before, after := linesOfContext(document, de.Highlight, offset, 3) var buf strings.Builder maxLine := errLine + len(after) - 1 lineColumnWidth := len(strconv.Itoa(maxLine)) // Write the lines of context strictly before the error. for i := len(before) - 1; i > 0; i-- { line := errLine - i buf.WriteString(formatLineNumber(line, lineColumnWidth)) buf.WriteString("|") if len(before[i]) > 0 { buf.WriteString(" ") buf.Write(before[i]) } buf.WriteRune('\n') } // Write the document line that contains the error. buf.WriteString(formatLineNumber(errLine, lineColumnWidth)) buf.WriteString("| ") if len(before) > 0 { buf.Write(before[0]) } buf.Write(de.Highlight) if len(after) > 0 { buf.Write(after[0]) } buf.WriteRune('\n') // Write the line with the error message itself (so it does not have a line // number). buf.WriteString(strings.Repeat(" ", lineColumnWidth)) buf.WriteString("| ") if len(before) > 0 { buf.WriteString(strings.Repeat(" ", len(before[0]))) } buf.WriteString(strings.Repeat("~", len(de.Highlight))) if len(errMessage) > 0 { buf.WriteString(" ") buf.WriteString(errMessage) } // Write the lines of context strictly after the error. for i := 1; i < len(after); i++ { buf.WriteRune('\n') line := errLine + i buf.WriteString(formatLineNumber(line, lineColumnWidth)) buf.WriteString("|") if len(after[i]) > 0 { buf.WriteString(" ") buf.Write(after[i]) } } return &DecodeError{ message: errMessage, line: errLine, column: errColumn, key: de.Key, human: buf.String(), } } func formatLineNumber(line int, width int) string { format := "%" + strconv.Itoa(width) + "d" return fmt.Sprintf(format, line) } func linesOfContext(document []byte, highlight []byte, offset int, linesAround int) ([][]byte, [][]byte) { return beforeLines(document, offset, linesAround), afterLines(document, highlight, offset, linesAround) } func beforeLines(document []byte, offset int, linesAround int) [][]byte { var beforeLines [][]byte // Walk the document backward from the highlight to find previous lines // of context. rest := document[:offset] backward: for o := len(rest) - 1; o >= 0 && len(beforeLines) <= linesAround && len(rest) > 0; { switch { case rest[o] == '\n': // handle individual lines beforeLines = append(beforeLines, rest[o+1:]) rest = rest[:o] o = len(rest) - 1 case o == 0: // add the first line only if it's non-empty beforeLines = append(beforeLines, rest) break backward default: o-- } } return beforeLines } func afterLines(document []byte, highlight []byte, offset int, linesAround int) [][]byte { var afterLines [][]byte // Walk the document forward from the highlight to find the following // lines of context. rest := document[offset+len(highlight):] forward: for o := 0; o < len(rest) && len(afterLines) <= linesAround; { switch { case rest[o] == '\n': // handle individual lines afterLines = append(afterLines, rest[:o]) rest = rest[o+1:] o = 0 case o == len(rest)-1: // add last line only if it's non-empty afterLines = append(afterLines, rest) break forward default: o++ } } return afterLines } func positionAtEnd(b []byte) (row int, column int) { row = 1 column = 1 for _, c := range b { if c == '\n' { row++ column = 1 } else { column++ } } return }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/pelletier/go-toml/v2/doc.go
vendor/github.com/pelletier/go-toml/v2/doc.go
// Package toml is a library to read and write TOML documents. package toml
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/pelletier/go-toml/v2/unmarshaler.go
vendor/github.com/pelletier/go-toml/v2/unmarshaler.go
package toml import ( "encoding" "errors" "fmt" "io" "math" "reflect" "strconv" "strings" "sync/atomic" "time" "github.com/pelletier/go-toml/v2/internal/danger" "github.com/pelletier/go-toml/v2/internal/tracker" "github.com/pelletier/go-toml/v2/unstable" ) // Unmarshal deserializes a TOML document into a Go value. // // It is a shortcut for Decoder.Decode() with the default options. func Unmarshal(data []byte, v interface{}) error { d := decoder{} d.p.Reset(data) return d.FromParser(v) } // Decoder reads and decode a TOML document from an input stream. type Decoder struct { // input r io.Reader // global settings strict bool // toggles unmarshaler interface unmarshalerInterface bool } // NewDecoder creates a new Decoder that will read from r. func NewDecoder(r io.Reader) *Decoder { return &Decoder{r: r} } // DisallowUnknownFields causes the Decoder to return an error when the // destination is a struct and the input contains a key that does not match a // non-ignored field. // // In that case, the Decoder returns a StrictMissingError that can be used to // retrieve the individual errors as well as generate a human readable // description of the missing fields. func (d *Decoder) DisallowUnknownFields() *Decoder { d.strict = true return d } // EnableUnmarshalerInterface allows to enable unmarshaler interface. // // With this feature enabled, types implementing the unstable/Unmarshaler // interface can be decoded from any structure of the document. It allows types // that don't have a straightfoward TOML representation to provide their own // decoding logic. // // Currently, types can only decode from a single value. Tables and array tables // are not supported. // // *Unstable:* This method does not follow the compatibility guarantees of // semver. It can be changed or removed without a new major version being // issued. func (d *Decoder) EnableUnmarshalerInterface() *Decoder { d.unmarshalerInterface = true return d } // Decode the whole content of r into v. // // By default, values in the document that don't exist in the target Go value // are ignored. See Decoder.DisallowUnknownFields() to change this behavior. // // When a TOML local date, time, or date-time is decoded into a time.Time, its // value is represented in time.Local timezone. Otherwise the appropriate Local* // structure is used. For time values, precision up to the nanosecond is // supported by truncating extra digits. // // Empty tables decoded in an interface{} create an empty initialized // map[string]interface{}. // // Types implementing the encoding.TextUnmarshaler interface are decoded from a // TOML string. // // When decoding a number, go-toml will return an error if the number is out of // bounds for the target type (which includes negative numbers when decoding // into an unsigned int). // // If an error occurs while decoding the content of the document, this function // returns a toml.DecodeError, providing context about the issue. When using // strict mode and a field is missing, a `toml.StrictMissingError` is // returned. In any other case, this function returns a standard Go error. // // # Type mapping // // List of supported TOML types and their associated accepted Go types: // // String -> string // Integer -> uint*, int*, depending on size // Float -> float*, depending on size // Boolean -> bool // Offset Date-Time -> time.Time // Local Date-time -> LocalDateTime, time.Time // Local Date -> LocalDate, time.Time // Local Time -> LocalTime, time.Time // Array -> slice and array, depending on elements types // Table -> map and struct // Inline Table -> same as Table // Array of Tables -> same as Array and Table func (d *Decoder) Decode(v interface{}) error { b, err := io.ReadAll(d.r) if err != nil { return fmt.Errorf("toml: %w", err) } dec := decoder{ strict: strict{ Enabled: d.strict, }, unmarshalerInterface: d.unmarshalerInterface, } dec.p.Reset(b) return dec.FromParser(v) } type decoder struct { // Which parser instance in use for this decoding session. p unstable.Parser // Flag indicating that the current expression is stashed. // If set to true, calling nextExpr will not actually pull a new expression // but turn off the flag instead. stashedExpr bool // Skip expressions until a table is found. This is set to true when a // table could not be created (missing field in map), so all KV expressions // need to be skipped. skipUntilTable bool // Flag indicating that the current array/slice table should be cleared because // it is the first encounter of an array table. clearArrayTable bool // Tracks position in Go arrays. // This is used when decoding [[array tables]] into Go arrays. Given array // tables are separate TOML expression, we need to keep track of where we // are at in the Go array, as we can't just introspect its size. arrayIndexes map[reflect.Value]int // Tracks keys that have been seen, with which type. seen tracker.SeenTracker // Strict mode strict strict // Flag that enables/disables unmarshaler interface. unmarshalerInterface bool // Current context for the error. errorContext *errorContext } type errorContext struct { Struct reflect.Type Field []int } func (d *decoder) typeMismatchError(toml string, target reflect.Type) error { return fmt.Errorf("toml: %s", d.typeMismatchString(toml, target)) } func (d *decoder) typeMismatchString(toml string, target reflect.Type) string { if d.errorContext != nil && d.errorContext.Struct != nil { ctx := d.errorContext f := ctx.Struct.FieldByIndex(ctx.Field) return fmt.Sprintf("cannot decode TOML %s into struct field %s.%s of type %s", toml, ctx.Struct, f.Name, f.Type) } return fmt.Sprintf("cannot decode TOML %s into a Go value of type %s", toml, target) } func (d *decoder) expr() *unstable.Node { return d.p.Expression() } func (d *decoder) nextExpr() bool { if d.stashedExpr { d.stashedExpr = false return true } return d.p.NextExpression() } func (d *decoder) stashExpr() { d.stashedExpr = true } func (d *decoder) arrayIndex(shouldAppend bool, v reflect.Value) int { if d.arrayIndexes == nil { d.arrayIndexes = make(map[reflect.Value]int, 1) } idx, ok := d.arrayIndexes[v] if !ok { d.arrayIndexes[v] = 0 } else if shouldAppend { idx++ d.arrayIndexes[v] = idx } return idx } func (d *decoder) FromParser(v interface{}) error { r := reflect.ValueOf(v) if r.Kind() != reflect.Ptr { return fmt.Errorf("toml: decoding can only be performed into a pointer, not %s", r.Kind()) } if r.IsNil() { return fmt.Errorf("toml: decoding pointer target cannot be nil") } r = r.Elem() if r.Kind() == reflect.Interface && r.IsNil() { newMap := map[string]interface{}{} r.Set(reflect.ValueOf(newMap)) } err := d.fromParser(r) if err == nil { return d.strict.Error(d.p.Data()) } var e *unstable.ParserError if errors.As(err, &e) { return wrapDecodeError(d.p.Data(), e) } return err } func (d *decoder) fromParser(root reflect.Value) error { for d.nextExpr() { err := d.handleRootExpression(d.expr(), root) if err != nil { return err } } return d.p.Error() } /* Rules for the unmarshal code: - The stack is used to keep track of which values need to be set where. - handle* functions <=> switch on a given unstable.Kind. - unmarshalX* functions need to unmarshal a node of kind X. - An "object" is either a struct or a map. */ func (d *decoder) handleRootExpression(expr *unstable.Node, v reflect.Value) error { var x reflect.Value var err error var first bool // used for to clear array tables on first use if !(d.skipUntilTable && expr.Kind == unstable.KeyValue) { first, err = d.seen.CheckExpression(expr) if err != nil { return err } } switch expr.Kind { case unstable.KeyValue: if d.skipUntilTable { return nil } x, err = d.handleKeyValue(expr, v) case unstable.Table: d.skipUntilTable = false d.strict.EnterTable(expr) x, err = d.handleTable(expr.Key(), v) case unstable.ArrayTable: d.skipUntilTable = false d.strict.EnterArrayTable(expr) d.clearArrayTable = first x, err = d.handleArrayTable(expr.Key(), v) default: panic(fmt.Errorf("parser should not permit expression of kind %s at document root", expr.Kind)) } if d.skipUntilTable { if expr.Kind == unstable.Table || expr.Kind == unstable.ArrayTable { d.strict.MissingTable(expr) } } else if err == nil && x.IsValid() { v.Set(x) } return err } func (d *decoder) handleArrayTable(key unstable.Iterator, v reflect.Value) (reflect.Value, error) { if key.Next() { return d.handleArrayTablePart(key, v) } return d.handleKeyValues(v) } func (d *decoder) handleArrayTableCollectionLast(key unstable.Iterator, v reflect.Value) (reflect.Value, error) { switch v.Kind() { case reflect.Interface: elem := v.Elem() if !elem.IsValid() { elem = reflect.New(sliceInterfaceType).Elem() elem.Set(reflect.MakeSlice(sliceInterfaceType, 0, 16)) } else if elem.Kind() == reflect.Slice { if elem.Type() != sliceInterfaceType { elem = reflect.New(sliceInterfaceType).Elem() elem.Set(reflect.MakeSlice(sliceInterfaceType, 0, 16)) } else if !elem.CanSet() { nelem := reflect.New(sliceInterfaceType).Elem() nelem.Set(reflect.MakeSlice(sliceInterfaceType, elem.Len(), elem.Cap())) reflect.Copy(nelem, elem) elem = nelem } if d.clearArrayTable && elem.Len() > 0 { elem.SetLen(0) d.clearArrayTable = false } } return d.handleArrayTableCollectionLast(key, elem) case reflect.Ptr: elem := v.Elem() if !elem.IsValid() { ptr := reflect.New(v.Type().Elem()) v.Set(ptr) elem = ptr.Elem() } elem, err := d.handleArrayTableCollectionLast(key, elem) if err != nil { return reflect.Value{}, err } v.Elem().Set(elem) return v, nil case reflect.Slice: if d.clearArrayTable && v.Len() > 0 { v.SetLen(0) d.clearArrayTable = false } elemType := v.Type().Elem() var elem reflect.Value if elemType.Kind() == reflect.Interface { elem = makeMapStringInterface() } else { elem = reflect.New(elemType).Elem() } elem2, err := d.handleArrayTable(key, elem) if err != nil { return reflect.Value{}, err } if elem2.IsValid() { elem = elem2 } return reflect.Append(v, elem), nil case reflect.Array: idx := d.arrayIndex(true, v) if idx >= v.Len() { return v, fmt.Errorf("%s at position %d", d.typeMismatchError("array table", v.Type()), idx) } elem := v.Index(idx) _, err := d.handleArrayTable(key, elem) return v, err default: return reflect.Value{}, d.typeMismatchError("array table", v.Type()) } } // When parsing an array table expression, each part of the key needs to be // evaluated like a normal key, but if it returns a collection, it also needs to // point to the last element of the collection. Unless it is the last part of // the key, then it needs to create a new element at the end. func (d *decoder) handleArrayTableCollection(key unstable.Iterator, v reflect.Value) (reflect.Value, error) { if key.IsLast() { return d.handleArrayTableCollectionLast(key, v) } switch v.Kind() { case reflect.Ptr: elem := v.Elem() if !elem.IsValid() { ptr := reflect.New(v.Type().Elem()) v.Set(ptr) elem = ptr.Elem() } elem, err := d.handleArrayTableCollection(key, elem) if err != nil { return reflect.Value{}, err } if elem.IsValid() { v.Elem().Set(elem) } return v, nil case reflect.Slice: elem := v.Index(v.Len() - 1) x, err := d.handleArrayTable(key, elem) if err != nil || d.skipUntilTable { return reflect.Value{}, err } if x.IsValid() { elem.Set(x) } return v, err case reflect.Array: idx := d.arrayIndex(false, v) if idx >= v.Len() { return v, fmt.Errorf("%s at position %d", d.typeMismatchError("array table", v.Type()), idx) } elem := v.Index(idx) _, err := d.handleArrayTable(key, elem) return v, err } return d.handleArrayTable(key, v) } func (d *decoder) handleKeyPart(key unstable.Iterator, v reflect.Value, nextFn handlerFn, makeFn valueMakerFn) (reflect.Value, error) { var rv reflect.Value // First, dispatch over v to make sure it is a valid object. // There is no guarantee over what it could be. switch v.Kind() { case reflect.Ptr: elem := v.Elem() if !elem.IsValid() { v.Set(reflect.New(v.Type().Elem())) } elem = v.Elem() return d.handleKeyPart(key, elem, nextFn, makeFn) case reflect.Map: vt := v.Type() // Create the key for the map element. Convert to key type. mk, err := d.keyFromData(vt.Key(), key.Node().Data) if err != nil { return reflect.Value{}, err } // If the map does not exist, create it. if v.IsNil() { vt := v.Type() v = reflect.MakeMap(vt) rv = v } mv := v.MapIndex(mk) set := false if !mv.IsValid() { // If there is no value in the map, create a new one according to // the map type. If the element type is interface, create either a // map[string]interface{} or a []interface{} depending on whether // this is the last part of the array table key. t := vt.Elem() if t.Kind() == reflect.Interface { mv = makeFn() } else { mv = reflect.New(t).Elem() } set = true } else if mv.Kind() == reflect.Interface { mv = mv.Elem() if !mv.IsValid() { mv = makeFn() } set = true } else if !mv.CanAddr() { vt := v.Type() t := vt.Elem() oldmv := mv mv = reflect.New(t).Elem() mv.Set(oldmv) set = true } x, err := nextFn(key, mv) if err != nil { return reflect.Value{}, err } if x.IsValid() { mv = x set = true } if set { v.SetMapIndex(mk, mv) } case reflect.Struct: path, found := structFieldPath(v, string(key.Node().Data)) if !found { d.skipUntilTable = true return reflect.Value{}, nil } if d.errorContext == nil { d.errorContext = new(errorContext) } t := v.Type() d.errorContext.Struct = t d.errorContext.Field = path f := fieldByIndex(v, path) x, err := nextFn(key, f) if err != nil || d.skipUntilTable { return reflect.Value{}, err } if x.IsValid() { f.Set(x) } d.errorContext.Field = nil d.errorContext.Struct = nil case reflect.Interface: if v.Elem().IsValid() { v = v.Elem() } else { v = makeMapStringInterface() } x, err := d.handleKeyPart(key, v, nextFn, makeFn) if err != nil { return reflect.Value{}, err } if x.IsValid() { v = x } rv = v default: panic(fmt.Errorf("unhandled part: %s", v.Kind())) } return rv, nil } // HandleArrayTablePart navigates the Go structure v using the key v. It is // only used for the prefix (non-last) parts of an array-table. When // encountering a collection, it should go to the last element. func (d *decoder) handleArrayTablePart(key unstable.Iterator, v reflect.Value) (reflect.Value, error) { var makeFn valueMakerFn if key.IsLast() { makeFn = makeSliceInterface } else { makeFn = makeMapStringInterface } return d.handleKeyPart(key, v, d.handleArrayTableCollection, makeFn) } // HandleTable returns a reference when it has checked the next expression but // cannot handle it. func (d *decoder) handleTable(key unstable.Iterator, v reflect.Value) (reflect.Value, error) { if v.Kind() == reflect.Slice { if v.Len() == 0 { return reflect.Value{}, unstable.NewParserError(key.Node().Data, "cannot store a table in a slice") } elem := v.Index(v.Len() - 1) x, err := d.handleTable(key, elem) if err != nil { return reflect.Value{}, err } if x.IsValid() { elem.Set(x) } return reflect.Value{}, nil } if key.Next() { // Still scoping the key return d.handleTablePart(key, v) } // Done scoping the key. // Now handle all the key-value expressions in this table. return d.handleKeyValues(v) } // Handle root expressions until the end of the document or the next // non-key-value. func (d *decoder) handleKeyValues(v reflect.Value) (reflect.Value, error) { var rv reflect.Value for d.nextExpr() { expr := d.expr() if expr.Kind != unstable.KeyValue { // Stash the expression so that fromParser can just loop and use // the right handler. // We could just recurse ourselves here, but at least this gives a // chance to pop the stack a bit. d.stashExpr() break } _, err := d.seen.CheckExpression(expr) if err != nil { return reflect.Value{}, err } x, err := d.handleKeyValue(expr, v) if err != nil { return reflect.Value{}, err } if x.IsValid() { v = x rv = x } } return rv, nil } type ( handlerFn func(key unstable.Iterator, v reflect.Value) (reflect.Value, error) valueMakerFn func() reflect.Value ) func makeMapStringInterface() reflect.Value { return reflect.MakeMap(mapStringInterfaceType) } func makeSliceInterface() reflect.Value { return reflect.MakeSlice(sliceInterfaceType, 0, 16) } func (d *decoder) handleTablePart(key unstable.Iterator, v reflect.Value) (reflect.Value, error) { return d.handleKeyPart(key, v, d.handleTable, makeMapStringInterface) } func (d *decoder) tryTextUnmarshaler(node *unstable.Node, v reflect.Value) (bool, error) { // Special case for time, because we allow to unmarshal to it from // different kind of AST nodes. if v.Type() == timeType { return false, nil } if v.CanAddr() && v.Addr().Type().Implements(textUnmarshalerType) { err := v.Addr().Interface().(encoding.TextUnmarshaler).UnmarshalText(node.Data) if err != nil { return false, unstable.NewParserError(d.p.Raw(node.Raw), "%w", err) } return true, nil } return false, nil } func (d *decoder) handleValue(value *unstable.Node, v reflect.Value) error { for v.Kind() == reflect.Ptr { v = initAndDereferencePointer(v) } if d.unmarshalerInterface { if v.CanAddr() && v.Addr().CanInterface() { if outi, ok := v.Addr().Interface().(unstable.Unmarshaler); ok { return outi.UnmarshalTOML(value) } } } ok, err := d.tryTextUnmarshaler(value, v) if ok || err != nil { return err } switch value.Kind { case unstable.String: return d.unmarshalString(value, v) case unstable.Integer: return d.unmarshalInteger(value, v) case unstable.Float: return d.unmarshalFloat(value, v) case unstable.Bool: return d.unmarshalBool(value, v) case unstable.DateTime: return d.unmarshalDateTime(value, v) case unstable.LocalDate: return d.unmarshalLocalDate(value, v) case unstable.LocalTime: return d.unmarshalLocalTime(value, v) case unstable.LocalDateTime: return d.unmarshalLocalDateTime(value, v) case unstable.InlineTable: return d.unmarshalInlineTable(value, v) case unstable.Array: return d.unmarshalArray(value, v) default: panic(fmt.Errorf("handleValue not implemented for %s", value.Kind)) } } func (d *decoder) unmarshalArray(array *unstable.Node, v reflect.Value) error { switch v.Kind() { case reflect.Slice: if v.IsNil() { v.Set(reflect.MakeSlice(v.Type(), 0, 16)) } else { v.SetLen(0) } case reflect.Array: // arrays are always initialized case reflect.Interface: elem := v.Elem() if !elem.IsValid() { elem = reflect.New(sliceInterfaceType).Elem() elem.Set(reflect.MakeSlice(sliceInterfaceType, 0, 16)) } else if elem.Kind() == reflect.Slice { if elem.Type() != sliceInterfaceType { elem = reflect.New(sliceInterfaceType).Elem() elem.Set(reflect.MakeSlice(sliceInterfaceType, 0, 16)) } else if !elem.CanSet() { nelem := reflect.New(sliceInterfaceType).Elem() nelem.Set(reflect.MakeSlice(sliceInterfaceType, elem.Len(), elem.Cap())) reflect.Copy(nelem, elem) elem = nelem } } err := d.unmarshalArray(array, elem) if err != nil { return err } v.Set(elem) return nil default: // TODO: use newDecodeError, but first the parser needs to fill // array.Data. return d.typeMismatchError("array", v.Type()) } elemType := v.Type().Elem() it := array.Children() idx := 0 for it.Next() { n := it.Node() // TODO: optimize if v.Kind() == reflect.Slice { elem := reflect.New(elemType).Elem() err := d.handleValue(n, elem) if err != nil { return err } v.Set(reflect.Append(v, elem)) } else { // array if idx >= v.Len() { return nil } elem := v.Index(idx) err := d.handleValue(n, elem) if err != nil { return err } idx++ } } return nil } func (d *decoder) unmarshalInlineTable(itable *unstable.Node, v reflect.Value) error { // Make sure v is an initialized object. switch v.Kind() { case reflect.Map: if v.IsNil() { v.Set(reflect.MakeMap(v.Type())) } case reflect.Struct: // structs are always initialized. case reflect.Interface: elem := v.Elem() if !elem.IsValid() { elem = makeMapStringInterface() v.Set(elem) } return d.unmarshalInlineTable(itable, elem) default: return unstable.NewParserError(d.p.Raw(itable.Raw), "cannot store inline table in Go type %s", v.Kind()) } it := itable.Children() for it.Next() { n := it.Node() x, err := d.handleKeyValue(n, v) if err != nil { return err } if x.IsValid() { v = x } } return nil } func (d *decoder) unmarshalDateTime(value *unstable.Node, v reflect.Value) error { dt, err := parseDateTime(value.Data) if err != nil { return err } v.Set(reflect.ValueOf(dt)) return nil } func (d *decoder) unmarshalLocalDate(value *unstable.Node, v reflect.Value) error { ld, err := parseLocalDate(value.Data) if err != nil { return err } if v.Type() == timeType { cast := ld.AsTime(time.Local) v.Set(reflect.ValueOf(cast)) return nil } v.Set(reflect.ValueOf(ld)) return nil } func (d *decoder) unmarshalLocalTime(value *unstable.Node, v reflect.Value) error { lt, rest, err := parseLocalTime(value.Data) if err != nil { return err } if len(rest) > 0 { return unstable.NewParserError(rest, "extra characters at the end of a local time") } v.Set(reflect.ValueOf(lt)) return nil } func (d *decoder) unmarshalLocalDateTime(value *unstable.Node, v reflect.Value) error { ldt, rest, err := parseLocalDateTime(value.Data) if err != nil { return err } if len(rest) > 0 { return unstable.NewParserError(rest, "extra characters at the end of a local date time") } if v.Type() == timeType { cast := ldt.AsTime(time.Local) v.Set(reflect.ValueOf(cast)) return nil } v.Set(reflect.ValueOf(ldt)) return nil } func (d *decoder) unmarshalBool(value *unstable.Node, v reflect.Value) error { b := value.Data[0] == 't' switch v.Kind() { case reflect.Bool: v.SetBool(b) case reflect.Interface: v.Set(reflect.ValueOf(b)) default: return unstable.NewParserError(value.Data, "cannot assign boolean to a %t", b) } return nil } func (d *decoder) unmarshalFloat(value *unstable.Node, v reflect.Value) error { f, err := parseFloat(value.Data) if err != nil { return err } switch v.Kind() { case reflect.Float64: v.SetFloat(f) case reflect.Float32: if f > math.MaxFloat32 { return unstable.NewParserError(value.Data, "number %f does not fit in a float32", f) } v.SetFloat(f) case reflect.Interface: v.Set(reflect.ValueOf(f)) default: return unstable.NewParserError(value.Data, "float cannot be assigned to %s", v.Kind()) } return nil } const ( maxInt = int64(^uint(0) >> 1) minInt = -maxInt - 1 ) // Maximum value of uint for decoding. Currently the decoder parses the integer // into an int64. As a result, on architectures where uint is 64 bits, the // effective maximum uint we can decode is the maximum of int64. On // architectures where uint is 32 bits, the maximum value we can decode is // lower: the maximum of uint32. I didn't find a way to figure out this value at // compile time, so it is computed during initialization. var maxUint int64 = math.MaxInt64 func init() { m := uint64(^uint(0)) if m < uint64(maxUint) { maxUint = int64(m) } } func (d *decoder) unmarshalInteger(value *unstable.Node, v reflect.Value) error { kind := v.Kind() if kind == reflect.Float32 || kind == reflect.Float64 { return d.unmarshalFloat(value, v) } i, err := parseInteger(value.Data) if err != nil { return err } var r reflect.Value switch kind { case reflect.Int64: v.SetInt(i) return nil case reflect.Int32: if i < math.MinInt32 || i > math.MaxInt32 { return fmt.Errorf("toml: number %d does not fit in an int32", i) } r = reflect.ValueOf(int32(i)) case reflect.Int16: if i < math.MinInt16 || i > math.MaxInt16 { return fmt.Errorf("toml: number %d does not fit in an int16", i) } r = reflect.ValueOf(int16(i)) case reflect.Int8: if i < math.MinInt8 || i > math.MaxInt8 { return fmt.Errorf("toml: number %d does not fit in an int8", i) } r = reflect.ValueOf(int8(i)) case reflect.Int: if i < minInt || i > maxInt { return fmt.Errorf("toml: number %d does not fit in an int", i) } r = reflect.ValueOf(int(i)) case reflect.Uint64: if i < 0 { return fmt.Errorf("toml: negative number %d does not fit in an uint64", i) } r = reflect.ValueOf(uint64(i)) case reflect.Uint32: if i < 0 || i > math.MaxUint32 { return fmt.Errorf("toml: negative number %d does not fit in an uint32", i) } r = reflect.ValueOf(uint32(i)) case reflect.Uint16: if i < 0 || i > math.MaxUint16 { return fmt.Errorf("toml: negative number %d does not fit in an uint16", i) } r = reflect.ValueOf(uint16(i)) case reflect.Uint8: if i < 0 || i > math.MaxUint8 { return fmt.Errorf("toml: negative number %d does not fit in an uint8", i) } r = reflect.ValueOf(uint8(i)) case reflect.Uint: if i < 0 || i > maxUint { return fmt.Errorf("toml: negative number %d does not fit in an uint", i) } r = reflect.ValueOf(uint(i)) case reflect.Interface: r = reflect.ValueOf(i) default: return unstable.NewParserError(d.p.Raw(value.Raw), d.typeMismatchString("integer", v.Type())) } if !r.Type().AssignableTo(v.Type()) { r = r.Convert(v.Type()) } v.Set(r) return nil } func (d *decoder) unmarshalString(value *unstable.Node, v reflect.Value) error { switch v.Kind() { case reflect.String: v.SetString(string(value.Data)) case reflect.Interface: v.Set(reflect.ValueOf(string(value.Data))) default: return unstable.NewParserError(d.p.Raw(value.Raw), d.typeMismatchString("string", v.Type())) } return nil } func (d *decoder) handleKeyValue(expr *unstable.Node, v reflect.Value) (reflect.Value, error) { d.strict.EnterKeyValue(expr) v, err := d.handleKeyValueInner(expr.Key(), expr.Value(), v) if d.skipUntilTable { d.strict.MissingField(expr) d.skipUntilTable = false } d.strict.ExitKeyValue(expr) return v, err } func (d *decoder) handleKeyValueInner(key unstable.Iterator, value *unstable.Node, v reflect.Value) (reflect.Value, error) { if key.Next() { // Still scoping the key return d.handleKeyValuePart(key, value, v) } // Done scoping the key. // v is whatever Go value we need to fill. return reflect.Value{}, d.handleValue(value, v) } func (d *decoder) keyFromData(keyType reflect.Type, data []byte) (reflect.Value, error) { switch { case stringType.AssignableTo(keyType): return reflect.ValueOf(string(data)), nil case stringType.ConvertibleTo(keyType): return reflect.ValueOf(string(data)).Convert(keyType), nil case keyType.Implements(textUnmarshalerType): mk := reflect.New(keyType.Elem()) if err := mk.Interface().(encoding.TextUnmarshaler).UnmarshalText(data); err != nil { return reflect.Value{}, fmt.Errorf("toml: error unmarshalling key type %s from text: %w", stringType, err) } return mk, nil case reflect.PointerTo(keyType).Implements(textUnmarshalerType): mk := reflect.New(keyType) if err := mk.Interface().(encoding.TextUnmarshaler).UnmarshalText(data); err != nil { return reflect.Value{}, fmt.Errorf("toml: error unmarshalling key type %s from text: %w", stringType, err) } return mk.Elem(), nil case keyType.Kind() == reflect.Int || keyType.Kind() == reflect.Int8 || keyType.Kind() == reflect.Int16 || keyType.Kind() == reflect.Int32 || keyType.Kind() == reflect.Int64: key, err := strconv.ParseInt(string(data), 10, 64) if err != nil { return reflect.Value{}, fmt.Errorf("toml: error parsing key of type %s from integer: %w", stringType, err) } return reflect.ValueOf(key).Convert(keyType), nil case keyType.Kind() == reflect.Uint || keyType.Kind() == reflect.Uint8 || keyType.Kind() == reflect.Uint16 || keyType.Kind() == reflect.Uint32 || keyType.Kind() == reflect.Uint64: key, err := strconv.ParseUint(string(data), 10, 64) if err != nil { return reflect.Value{}, fmt.Errorf("toml: error parsing key of type %s from unsigned integer: %w", stringType, err) } return reflect.ValueOf(key).Convert(keyType), nil case keyType.Kind() == reflect.Float32: key, err := strconv.ParseFloat(string(data), 32) if err != nil { return reflect.Value{}, fmt.Errorf("toml: error parsing key of type %s from float: %w", stringType, err) } return reflect.ValueOf(float32(key)), nil case keyType.Kind() == reflect.Float64: key, err := strconv.ParseFloat(string(data), 64) if err != nil { return reflect.Value{}, fmt.Errorf("toml: error parsing key of type %s from float: %w", stringType, err) } return reflect.ValueOf(float64(key)), nil } return reflect.Value{}, fmt.Errorf("toml: cannot convert map key of type %s to expected type %s", stringType, keyType) } func (d *decoder) handleKeyValuePart(key unstable.Iterator, value *unstable.Node, v reflect.Value) (reflect.Value, error) { // contains the replacement for v var rv reflect.Value // First, dispatch over v to make sure it is a valid object. // There is no guarantee over what it could be. switch v.Kind() { case reflect.Map: vt := v.Type() mk, err := d.keyFromData(vt.Key(), key.Node().Data) if err != nil { return reflect.Value{}, err } // If the map does not exist, create it. if v.IsNil() { v = reflect.MakeMap(vt) rv = v } mv := v.MapIndex(mk) set := false if !mv.IsValid() || key.IsLast() { set = true mv = reflect.New(v.Type().Elem()).Elem() } nv, err := d.handleKeyValueInner(key, value, mv) if err != nil { return reflect.Value{}, err } if nv.IsValid() { mv = nv set = true } if set { v.SetMapIndex(mk, mv) } case reflect.Struct: path, found := structFieldPath(v, string(key.Node().Data)) if !found { d.skipUntilTable = true break } if d.errorContext == nil { d.errorContext = new(errorContext) } t := v.Type() d.errorContext.Struct = t d.errorContext.Field = path f := fieldByIndex(v, path) if !f.CanAddr() { // If the field is not addressable, need to take a slower path and // make a copy of the struct itself to a new location. nvp := reflect.New(v.Type()) nvp.Elem().Set(v) v = nvp.Elem() _, err := d.handleKeyValuePart(key, value, v) if err != nil { return reflect.Value{}, err } return nvp.Elem(), nil } x, err := d.handleKeyValueInner(key, value, f) if err != nil { return reflect.Value{}, err } if x.IsValid() { f.Set(x) } d.errorContext.Struct = nil d.errorContext.Field = nil case reflect.Interface: v = v.Elem() // Following encoding/json: decoding an object into an // interface{}, it needs to always hold a // map[string]interface{}. This is for the types to be // consistent whether a previous value was set or not. if !v.IsValid() || v.Type() != mapStringInterfaceType { v = makeMapStringInterface() } x, err := d.handleKeyValuePart(key, value, v) if err != nil { return reflect.Value{}, err } if x.IsValid() { v = x } rv = v case reflect.Ptr: elem := v.Elem() if !elem.IsValid() { ptr := reflect.New(v.Type().Elem()) v.Set(ptr) rv = v elem = ptr.Elem() } elem2, err := d.handleKeyValuePart(key, value, elem) if err != nil { return reflect.Value{}, err } if elem2.IsValid() { elem = elem2 } v.Elem().Set(elem) default: return reflect.Value{}, fmt.Errorf("unhandled kv part: %s", v.Kind()) } return rv, nil } func initAndDereferencePointer(v reflect.Value) reflect.Value { var elem reflect.Value if v.IsNil() { ptr := reflect.New(v.Type().Elem()) v.Set(ptr) } elem = v.Elem() return elem } // Same as reflect.Value.FieldByIndex, but creates pointers if needed. func fieldByIndex(v reflect.Value, path []int) reflect.Value { for _, x := range path { v = v.Field(x) if v.Kind() == reflect.Ptr { if v.IsNil() { v.Set(reflect.New(v.Type().Elem())) } v = v.Elem() } } return v } type fieldPathsMap = map[string][]int var globalFieldPathsCache atomic.Value // map[danger.TypeID]fieldPathsMap func structFieldPath(v reflect.Value, name string) ([]int, bool) {
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
true
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/pelletier/go-toml/v2/decode.go
vendor/github.com/pelletier/go-toml/v2/decode.go
package toml import ( "fmt" "math" "strconv" "time" "github.com/pelletier/go-toml/v2/unstable" ) func parseInteger(b []byte) (int64, error) { if len(b) > 2 && b[0] == '0' { switch b[1] { case 'x': return parseIntHex(b) case 'b': return parseIntBin(b) case 'o': return parseIntOct(b) default: panic(fmt.Errorf("invalid base '%c', should have been checked by scanIntOrFloat", b[1])) } } return parseIntDec(b) } func parseLocalDate(b []byte) (LocalDate, error) { // full-date = date-fullyear "-" date-month "-" date-mday // date-fullyear = 4DIGIT // date-month = 2DIGIT ; 01-12 // date-mday = 2DIGIT ; 01-28, 01-29, 01-30, 01-31 based on month/year var date LocalDate if len(b) != 10 || b[4] != '-' || b[7] != '-' { return date, unstable.NewParserError(b, "dates are expected to have the format YYYY-MM-DD") } var err error date.Year, err = parseDecimalDigits(b[0:4]) if err != nil { return LocalDate{}, err } date.Month, err = parseDecimalDigits(b[5:7]) if err != nil { return LocalDate{}, err } date.Day, err = parseDecimalDigits(b[8:10]) if err != nil { return LocalDate{}, err } if !isValidDate(date.Year, date.Month, date.Day) { return LocalDate{}, unstable.NewParserError(b, "impossible date") } return date, nil } func parseDecimalDigits(b []byte) (int, error) { v := 0 for i, c := range b { if c < '0' || c > '9' { return 0, unstable.NewParserError(b[i:i+1], "expected digit (0-9)") } v *= 10 v += int(c - '0') } return v, nil } func parseDateTime(b []byte) (time.Time, error) { // offset-date-time = full-date time-delim full-time // full-time = partial-time time-offset // time-offset = "Z" / time-numoffset // time-numoffset = ( "+" / "-" ) time-hour ":" time-minute dt, b, err := parseLocalDateTime(b) if err != nil { return time.Time{}, err } var zone *time.Location if len(b) == 0 { // parser should have checked that when assigning the date time node panic("date time should have a timezone") } if b[0] == 'Z' || b[0] == 'z' { b = b[1:] zone = time.UTC } else { const dateTimeByteLen = 6 if len(b) != dateTimeByteLen { return time.Time{}, unstable.NewParserError(b, "invalid date-time timezone") } var direction int switch b[0] { case '-': direction = -1 case '+': direction = +1 default: return time.Time{}, unstable.NewParserError(b[:1], "invalid timezone offset character") } if b[3] != ':' { return time.Time{}, unstable.NewParserError(b[3:4], "expected a : separator") } hours, err := parseDecimalDigits(b[1:3]) if err != nil { return time.Time{}, err } if hours > 23 { return time.Time{}, unstable.NewParserError(b[:1], "invalid timezone offset hours") } minutes, err := parseDecimalDigits(b[4:6]) if err != nil { return time.Time{}, err } if minutes > 59 { return time.Time{}, unstable.NewParserError(b[:1], "invalid timezone offset minutes") } seconds := direction * (hours*3600 + minutes*60) if seconds == 0 { zone = time.UTC } else { zone = time.FixedZone("", seconds) } b = b[dateTimeByteLen:] } if len(b) > 0 { return time.Time{}, unstable.NewParserError(b, "extra bytes at the end of the timezone") } t := time.Date( dt.Year, time.Month(dt.Month), dt.Day, dt.Hour, dt.Minute, dt.Second, dt.Nanosecond, zone) return t, nil } func parseLocalDateTime(b []byte) (LocalDateTime, []byte, error) { var dt LocalDateTime const localDateTimeByteMinLen = 11 if len(b) < localDateTimeByteMinLen { return dt, nil, unstable.NewParserError(b, "local datetimes are expected to have the format YYYY-MM-DDTHH:MM:SS[.NNNNNNNNN]") } date, err := parseLocalDate(b[:10]) if err != nil { return dt, nil, err } dt.LocalDate = date sep := b[10] if sep != 'T' && sep != ' ' && sep != 't' { return dt, nil, unstable.NewParserError(b[10:11], "datetime separator is expected to be T or a space") } t, rest, err := parseLocalTime(b[11:]) if err != nil { return dt, nil, err } dt.LocalTime = t return dt, rest, nil } // parseLocalTime is a bit different because it also returns the remaining // []byte that is didn't need. This is to allow parseDateTime to parse those // remaining bytes as a timezone. func parseLocalTime(b []byte) (LocalTime, []byte, error) { var ( nspow = [10]int{0, 1e8, 1e7, 1e6, 1e5, 1e4, 1e3, 1e2, 1e1, 1e0} t LocalTime ) // check if b matches to have expected format HH:MM:SS[.NNNNNN] const localTimeByteLen = 8 if len(b) < localTimeByteLen { return t, nil, unstable.NewParserError(b, "times are expected to have the format HH:MM:SS[.NNNNNN]") } var err error t.Hour, err = parseDecimalDigits(b[0:2]) if err != nil { return t, nil, err } if t.Hour > 23 { return t, nil, unstable.NewParserError(b[0:2], "hour cannot be greater 23") } if b[2] != ':' { return t, nil, unstable.NewParserError(b[2:3], "expecting colon between hours and minutes") } t.Minute, err = parseDecimalDigits(b[3:5]) if err != nil { return t, nil, err } if t.Minute > 59 { return t, nil, unstable.NewParserError(b[3:5], "minutes cannot be greater 59") } if b[5] != ':' { return t, nil, unstable.NewParserError(b[5:6], "expecting colon between minutes and seconds") } t.Second, err = parseDecimalDigits(b[6:8]) if err != nil { return t, nil, err } if t.Second > 60 { return t, nil, unstable.NewParserError(b[6:8], "seconds cannot be greater 60") } b = b[8:] if len(b) >= 1 && b[0] == '.' { frac := 0 precision := 0 digits := 0 for i, c := range b[1:] { if !isDigit(c) { if i == 0 { return t, nil, unstable.NewParserError(b[0:1], "need at least one digit after fraction point") } break } digits++ const maxFracPrecision = 9 if i >= maxFracPrecision { // go-toml allows decoding fractional seconds // beyond the supported precision of 9 // digits. It truncates the fractional component // to the supported precision and ignores the // remaining digits. // // https://github.com/pelletier/go-toml/discussions/707 continue } frac *= 10 frac += int(c - '0') precision++ } if precision == 0 { return t, nil, unstable.NewParserError(b[:1], "nanoseconds need at least one digit") } t.Nanosecond = frac * nspow[precision] t.Precision = precision return t, b[1+digits:], nil } return t, b, nil } //nolint:cyclop func parseFloat(b []byte) (float64, error) { if len(b) == 4 && (b[0] == '+' || b[0] == '-') && b[1] == 'n' && b[2] == 'a' && b[3] == 'n' { return math.NaN(), nil } cleaned, err := checkAndRemoveUnderscoresFloats(b) if err != nil { return 0, err } if cleaned[0] == '.' { return 0, unstable.NewParserError(b, "float cannot start with a dot") } if cleaned[len(cleaned)-1] == '.' { return 0, unstable.NewParserError(b, "float cannot end with a dot") } dotAlreadySeen := false for i, c := range cleaned { if c == '.' { if dotAlreadySeen { return 0, unstable.NewParserError(b[i:i+1], "float can have at most one decimal point") } if !isDigit(cleaned[i-1]) { return 0, unstable.NewParserError(b[i-1:i+1], "float decimal point must be preceded by a digit") } if !isDigit(cleaned[i+1]) { return 0, unstable.NewParserError(b[i:i+2], "float decimal point must be followed by a digit") } dotAlreadySeen = true } } start := 0 if cleaned[0] == '+' || cleaned[0] == '-' { start = 1 } if cleaned[start] == '0' && len(cleaned) > start+1 && isDigit(cleaned[start+1]) { return 0, unstable.NewParserError(b, "float integer part cannot have leading zeroes") } f, err := strconv.ParseFloat(string(cleaned), 64) if err != nil { return 0, unstable.NewParserError(b, "unable to parse float: %w", err) } return f, nil } func parseIntHex(b []byte) (int64, error) { cleaned, err := checkAndRemoveUnderscoresIntegers(b[2:]) if err != nil { return 0, err } i, err := strconv.ParseInt(string(cleaned), 16, 64) if err != nil { return 0, unstable.NewParserError(b, "couldn't parse hexadecimal number: %w", err) } return i, nil } func parseIntOct(b []byte) (int64, error) { cleaned, err := checkAndRemoveUnderscoresIntegers(b[2:]) if err != nil { return 0, err } i, err := strconv.ParseInt(string(cleaned), 8, 64) if err != nil { return 0, unstable.NewParserError(b, "couldn't parse octal number: %w", err) } return i, nil } func parseIntBin(b []byte) (int64, error) { cleaned, err := checkAndRemoveUnderscoresIntegers(b[2:]) if err != nil { return 0, err } i, err := strconv.ParseInt(string(cleaned), 2, 64) if err != nil { return 0, unstable.NewParserError(b, "couldn't parse binary number: %w", err) } return i, nil } func isSign(b byte) bool { return b == '+' || b == '-' } func parseIntDec(b []byte) (int64, error) { cleaned, err := checkAndRemoveUnderscoresIntegers(b) if err != nil { return 0, err } startIdx := 0 if isSign(cleaned[0]) { startIdx++ } if len(cleaned) > startIdx+1 && cleaned[startIdx] == '0' { return 0, unstable.NewParserError(b, "leading zero not allowed on decimal number") } i, err := strconv.ParseInt(string(cleaned), 10, 64) if err != nil { return 0, unstable.NewParserError(b, "couldn't parse decimal number: %w", err) } return i, nil } func checkAndRemoveUnderscoresIntegers(b []byte) ([]byte, error) { start := 0 if b[start] == '+' || b[start] == '-' { start++ } if len(b) == start { return b, nil } if b[start] == '_' { return nil, unstable.NewParserError(b[start:start+1], "number cannot start with underscore") } if b[len(b)-1] == '_' { return nil, unstable.NewParserError(b[len(b)-1:], "number cannot end with underscore") } // fast path i := 0 for ; i < len(b); i++ { if b[i] == '_' { break } } if i == len(b) { return b, nil } before := false cleaned := make([]byte, i, len(b)) copy(cleaned, b) for i++; i < len(b); i++ { c := b[i] if c == '_' { if !before { return nil, unstable.NewParserError(b[i-1:i+1], "number must have at least one digit between underscores") } before = false } else { before = true cleaned = append(cleaned, c) } } return cleaned, nil } func checkAndRemoveUnderscoresFloats(b []byte) ([]byte, error) { if b[0] == '_' { return nil, unstable.NewParserError(b[0:1], "number cannot start with underscore") } if b[len(b)-1] == '_' { return nil, unstable.NewParserError(b[len(b)-1:], "number cannot end with underscore") } // fast path i := 0 for ; i < len(b); i++ { if b[i] == '_' { break } } if i == len(b) { return b, nil } before := false cleaned := make([]byte, 0, len(b)) for i := 0; i < len(b); i++ { c := b[i] switch c { case '_': if !before { return nil, unstable.NewParserError(b[i-1:i+1], "number must have at least one digit between underscores") } if i < len(b)-1 && (b[i+1] == 'e' || b[i+1] == 'E') { return nil, unstable.NewParserError(b[i+1:i+2], "cannot have underscore before exponent") } before = false case '+', '-': // signed exponents cleaned = append(cleaned, c) before = false case 'e', 'E': if i < len(b)-1 && b[i+1] == '_' { return nil, unstable.NewParserError(b[i+1:i+2], "cannot have underscore after exponent") } cleaned = append(cleaned, c) case '.': if i < len(b)-1 && b[i+1] == '_' { return nil, unstable.NewParserError(b[i+1:i+2], "cannot have underscore after decimal point") } if i > 0 && b[i-1] == '_' { return nil, unstable.NewParserError(b[i-1:i], "cannot have underscore before decimal point") } cleaned = append(cleaned, c) default: before = true cleaned = append(cleaned, c) } } return cleaned, nil } // isValidDate checks if a provided date is a date that exists. func isValidDate(year int, month int, day int) bool { return month > 0 && month < 13 && day > 0 && day <= daysIn(month, year) } // daysBefore[m] counts the number of days in a non-leap year // before month m begins. There is an entry for m=12, counting // the number of days before January of next year (365). var daysBefore = [...]int32{ 0, 31, 31 + 28, 31 + 28 + 31, 31 + 28 + 31 + 30, 31 + 28 + 31 + 30 + 31, 31 + 28 + 31 + 30 + 31 + 30, 31 + 28 + 31 + 30 + 31 + 30 + 31, 31 + 28 + 31 + 30 + 31 + 30 + 31 + 31, 31 + 28 + 31 + 30 + 31 + 30 + 31 + 31 + 30, 31 + 28 + 31 + 30 + 31 + 30 + 31 + 31 + 30 + 31, 31 + 28 + 31 + 30 + 31 + 30 + 31 + 31 + 30 + 31 + 30, 31 + 28 + 31 + 30 + 31 + 30 + 31 + 31 + 30 + 31 + 30 + 31, } func daysIn(m int, year int) int { if m == 2 && isLeap(year) { return 29 } return int(daysBefore[m] - daysBefore[m-1]) } func isLeap(year int) bool { return year%4 == 0 && (year%100 != 0 || year%400 == 0) } func isDigit(r byte) bool { return r >= '0' && r <= '9' }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/pelletier/go-toml/v2/strict.go
vendor/github.com/pelletier/go-toml/v2/strict.go
package toml import ( "github.com/pelletier/go-toml/v2/internal/danger" "github.com/pelletier/go-toml/v2/internal/tracker" "github.com/pelletier/go-toml/v2/unstable" ) type strict struct { Enabled bool // Tracks the current key being processed. key tracker.KeyTracker missing []unstable.ParserError } func (s *strict) EnterTable(node *unstable.Node) { if !s.Enabled { return } s.key.UpdateTable(node) } func (s *strict) EnterArrayTable(node *unstable.Node) { if !s.Enabled { return } s.key.UpdateArrayTable(node) } func (s *strict) EnterKeyValue(node *unstable.Node) { if !s.Enabled { return } s.key.Push(node) } func (s *strict) ExitKeyValue(node *unstable.Node) { if !s.Enabled { return } s.key.Pop(node) } func (s *strict) MissingTable(node *unstable.Node) { if !s.Enabled { return } s.missing = append(s.missing, unstable.ParserError{ Highlight: keyLocation(node), Message: "missing table", Key: s.key.Key(), }) } func (s *strict) MissingField(node *unstable.Node) { if !s.Enabled { return } s.missing = append(s.missing, unstable.ParserError{ Highlight: keyLocation(node), Message: "missing field", Key: s.key.Key(), }) } func (s *strict) Error(doc []byte) error { if !s.Enabled || len(s.missing) == 0 { return nil } err := &StrictMissingError{ Errors: make([]DecodeError, 0, len(s.missing)), } for _, derr := range s.missing { derr := derr err.Errors = append(err.Errors, *wrapDecodeError(doc, &derr)) } return err } func keyLocation(node *unstable.Node) []byte { k := node.Key() hasOne := k.Next() if !hasOne { panic("should not be called with empty key") } start := k.Node().Data end := k.Node().Data for k.Next() { end = k.Node().Data } return danger.BytesRange(start, end) }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/pelletier/go-toml/v2/unstable/parser.go
vendor/github.com/pelletier/go-toml/v2/unstable/parser.go
package unstable import ( "bytes" "fmt" "unicode" "github.com/pelletier/go-toml/v2/internal/characters" "github.com/pelletier/go-toml/v2/internal/danger" ) // ParserError describes an error relative to the content of the document. // // It cannot outlive the instance of Parser it refers to, and may cause panics // if the parser is reset. type ParserError struct { Highlight []byte Message string Key []string // optional } // Error is the implementation of the error interface. func (e *ParserError) Error() string { return e.Message } // NewParserError is a convenience function to create a ParserError // // Warning: Highlight needs to be a subslice of Parser.data, so only slices // returned by Parser.Raw are valid candidates. func NewParserError(highlight []byte, format string, args ...interface{}) error { return &ParserError{ Highlight: highlight, Message: fmt.Errorf(format, args...).Error(), } } // Parser scans over a TOML-encoded document and generates an iterative AST. // // To prime the Parser, first reset it with the contents of a TOML document. // Then, process all top-level expressions sequentially. See Example. // // Don't forget to check Error() after you're done parsing. // // Each top-level expression needs to be fully processed before calling // NextExpression() again. Otherwise, calls to various Node methods may panic if // the parser has moved on the next expression. // // For performance reasons, go-toml doesn't make a copy of the input bytes to // the parser. Make sure to copy all the bytes you need to outlive the slice // given to the parser. type Parser struct { data []byte builder builder ref reference left []byte err error first bool KeepComments bool } // Data returns the slice provided to the last call to Reset. func (p *Parser) Data() []byte { return p.data } // Range returns a range description that corresponds to a given slice of the // input. If the argument is not a subslice of the parser input, this function // panics. func (p *Parser) Range(b []byte) Range { return Range{ Offset: uint32(danger.SubsliceOffset(p.data, b)), Length: uint32(len(b)), } } // Raw returns the slice corresponding to the bytes in the given range. func (p *Parser) Raw(raw Range) []byte { return p.data[raw.Offset : raw.Offset+raw.Length] } // Reset brings the parser to its initial state for a given input. It wipes an // reuses internal storage to reduce allocation. func (p *Parser) Reset(b []byte) { p.builder.Reset() p.ref = invalidReference p.data = b p.left = b p.err = nil p.first = true } // NextExpression parses the next top-level expression. If an expression was // successfully parsed, it returns true. If the parser is at the end of the // document or an error occurred, it returns false. // // Retrieve the parsed expression with Expression(). func (p *Parser) NextExpression() bool { if len(p.left) == 0 || p.err != nil { return false } p.builder.Reset() p.ref = invalidReference for { if len(p.left) == 0 || p.err != nil { return false } if !p.first { p.left, p.err = p.parseNewline(p.left) } if len(p.left) == 0 || p.err != nil { return false } p.ref, p.left, p.err = p.parseExpression(p.left) if p.err != nil { return false } p.first = false if p.ref.Valid() { return true } } } // Expression returns a pointer to the node representing the last successfully // parsed expression. func (p *Parser) Expression() *Node { return p.builder.NodeAt(p.ref) } // Error returns any error that has occurred during parsing. func (p *Parser) Error() error { return p.err } // Position describes a position in the input. type Position struct { // Number of bytes from the beginning of the input. Offset int // Line number, starting at 1. Line int // Column number, starting at 1. Column int } // Shape describes the position of a range in the input. type Shape struct { Start Position End Position } func (p *Parser) position(b []byte) Position { offset := danger.SubsliceOffset(p.data, b) lead := p.data[:offset] return Position{ Offset: offset, Line: bytes.Count(lead, []byte{'\n'}) + 1, Column: len(lead) - bytes.LastIndex(lead, []byte{'\n'}), } } // Shape returns the shape of the given range in the input. Will // panic if the range is not a subslice of the input. func (p *Parser) Shape(r Range) Shape { raw := p.Raw(r) return Shape{ Start: p.position(raw), End: p.position(raw[r.Length:]), } } func (p *Parser) parseNewline(b []byte) ([]byte, error) { if b[0] == '\n' { return b[1:], nil } if b[0] == '\r' { _, rest, err := scanWindowsNewline(b) return rest, err } return nil, NewParserError(b[0:1], "expected newline but got %#U", b[0]) } func (p *Parser) parseComment(b []byte) (reference, []byte, error) { ref := invalidReference data, rest, err := scanComment(b) if p.KeepComments && err == nil { ref = p.builder.Push(Node{ Kind: Comment, Raw: p.Range(data), Data: data, }) } return ref, rest, err } func (p *Parser) parseExpression(b []byte) (reference, []byte, error) { // expression = ws [ comment ] // expression =/ ws keyval ws [ comment ] // expression =/ ws table ws [ comment ] ref := invalidReference b = p.parseWhitespace(b) if len(b) == 0 { return ref, b, nil } if b[0] == '#' { ref, rest, err := p.parseComment(b) return ref, rest, err } if b[0] == '\n' || b[0] == '\r' { return ref, b, nil } var err error if b[0] == '[' { ref, b, err = p.parseTable(b) } else { ref, b, err = p.parseKeyval(b) } if err != nil { return ref, nil, err } b = p.parseWhitespace(b) if len(b) > 0 && b[0] == '#' { cref, rest, err := p.parseComment(b) if cref != invalidReference { p.builder.Chain(ref, cref) } return ref, rest, err } return ref, b, nil } func (p *Parser) parseTable(b []byte) (reference, []byte, error) { // table = std-table / array-table if len(b) > 1 && b[1] == '[' { return p.parseArrayTable(b) } return p.parseStdTable(b) } func (p *Parser) parseArrayTable(b []byte) (reference, []byte, error) { // array-table = array-table-open key array-table-close // array-table-open = %x5B.5B ws ; [[ Double left square bracket // array-table-close = ws %x5D.5D ; ]] Double right square bracket ref := p.builder.Push(Node{ Kind: ArrayTable, }) b = b[2:] b = p.parseWhitespace(b) k, b, err := p.parseKey(b) if err != nil { return ref, nil, err } p.builder.AttachChild(ref, k) b = p.parseWhitespace(b) b, err = expect(']', b) if err != nil { return ref, nil, err } b, err = expect(']', b) return ref, b, err } func (p *Parser) parseStdTable(b []byte) (reference, []byte, error) { // std-table = std-table-open key std-table-close // std-table-open = %x5B ws ; [ Left square bracket // std-table-close = ws %x5D ; ] Right square bracket ref := p.builder.Push(Node{ Kind: Table, }) b = b[1:] b = p.parseWhitespace(b) key, b, err := p.parseKey(b) if err != nil { return ref, nil, err } p.builder.AttachChild(ref, key) b = p.parseWhitespace(b) b, err = expect(']', b) return ref, b, err } func (p *Parser) parseKeyval(b []byte) (reference, []byte, error) { // keyval = key keyval-sep val ref := p.builder.Push(Node{ Kind: KeyValue, }) key, b, err := p.parseKey(b) if err != nil { return invalidReference, nil, err } // keyval-sep = ws %x3D ws ; = b = p.parseWhitespace(b) if len(b) == 0 { return invalidReference, nil, NewParserError(b, "expected = after a key, but the document ends there") } b, err = expect('=', b) if err != nil { return invalidReference, nil, err } b = p.parseWhitespace(b) valRef, b, err := p.parseVal(b) if err != nil { return ref, b, err } p.builder.Chain(valRef, key) p.builder.AttachChild(ref, valRef) return ref, b, err } //nolint:cyclop,funlen func (p *Parser) parseVal(b []byte) (reference, []byte, error) { // val = string / boolean / array / inline-table / date-time / float / integer ref := invalidReference if len(b) == 0 { return ref, nil, NewParserError(b, "expected value, not eof") } var err error c := b[0] switch c { case '"': var raw []byte var v []byte if scanFollowsMultilineBasicStringDelimiter(b) { raw, v, b, err = p.parseMultilineBasicString(b) } else { raw, v, b, err = p.parseBasicString(b) } if err == nil { ref = p.builder.Push(Node{ Kind: String, Raw: p.Range(raw), Data: v, }) } return ref, b, err case '\'': var raw []byte var v []byte if scanFollowsMultilineLiteralStringDelimiter(b) { raw, v, b, err = p.parseMultilineLiteralString(b) } else { raw, v, b, err = p.parseLiteralString(b) } if err == nil { ref = p.builder.Push(Node{ Kind: String, Raw: p.Range(raw), Data: v, }) } return ref, b, err case 't': if !scanFollowsTrue(b) { return ref, nil, NewParserError(atmost(b, 4), "expected 'true'") } ref = p.builder.Push(Node{ Kind: Bool, Data: b[:4], }) return ref, b[4:], nil case 'f': if !scanFollowsFalse(b) { return ref, nil, NewParserError(atmost(b, 5), "expected 'false'") } ref = p.builder.Push(Node{ Kind: Bool, Data: b[:5], }) return ref, b[5:], nil case '[': return p.parseValArray(b) case '{': return p.parseInlineTable(b) default: return p.parseIntOrFloatOrDateTime(b) } } func atmost(b []byte, n int) []byte { if n >= len(b) { return b } return b[:n] } func (p *Parser) parseLiteralString(b []byte) ([]byte, []byte, []byte, error) { v, rest, err := scanLiteralString(b) if err != nil { return nil, nil, nil, err } return v, v[1 : len(v)-1], rest, nil } func (p *Parser) parseInlineTable(b []byte) (reference, []byte, error) { // inline-table = inline-table-open [ inline-table-keyvals ] inline-table-close // inline-table-open = %x7B ws ; { // inline-table-close = ws %x7D ; } // inline-table-sep = ws %x2C ws ; , Comma // inline-table-keyvals = keyval [ inline-table-sep inline-table-keyvals ] parent := p.builder.Push(Node{ Kind: InlineTable, Raw: p.Range(b[:1]), }) first := true var child reference b = b[1:] var err error for len(b) > 0 { previousB := b b = p.parseWhitespace(b) if len(b) == 0 { return parent, nil, NewParserError(previousB[:1], "inline table is incomplete") } if b[0] == '}' { break } if !first { b, err = expect(',', b) if err != nil { return parent, nil, err } b = p.parseWhitespace(b) } var kv reference kv, b, err = p.parseKeyval(b) if err != nil { return parent, nil, err } if first { p.builder.AttachChild(parent, kv) } else { p.builder.Chain(child, kv) } child = kv first = false } rest, err := expect('}', b) return parent, rest, err } //nolint:funlen,cyclop func (p *Parser) parseValArray(b []byte) (reference, []byte, error) { // array = array-open [ array-values ] ws-comment-newline array-close // array-open = %x5B ; [ // array-close = %x5D ; ] // array-values = ws-comment-newline val ws-comment-newline array-sep array-values // array-values =/ ws-comment-newline val ws-comment-newline [ array-sep ] // array-sep = %x2C ; , Comma // ws-comment-newline = *( wschar / [ comment ] newline ) arrayStart := b b = b[1:] parent := p.builder.Push(Node{ Kind: Array, }) // First indicates whether the parser is looking for the first element // (non-comment) of the array. first := true lastChild := invalidReference addChild := func(valueRef reference) { if lastChild == invalidReference { p.builder.AttachChild(parent, valueRef) } else { p.builder.Chain(lastChild, valueRef) } lastChild = valueRef } var err error for len(b) > 0 { cref := invalidReference cref, b, err = p.parseOptionalWhitespaceCommentNewline(b) if err != nil { return parent, nil, err } if cref != invalidReference { addChild(cref) } if len(b) == 0 { return parent, nil, NewParserError(arrayStart[:1], "array is incomplete") } if b[0] == ']' { break } if b[0] == ',' { if first { return parent, nil, NewParserError(b[0:1], "array cannot start with comma") } b = b[1:] cref, b, err = p.parseOptionalWhitespaceCommentNewline(b) if err != nil { return parent, nil, err } if cref != invalidReference { addChild(cref) } } else if !first { return parent, nil, NewParserError(b[0:1], "array elements must be separated by commas") } // TOML allows trailing commas in arrays. if len(b) > 0 && b[0] == ']' { break } var valueRef reference valueRef, b, err = p.parseVal(b) if err != nil { return parent, nil, err } addChild(valueRef) cref, b, err = p.parseOptionalWhitespaceCommentNewline(b) if err != nil { return parent, nil, err } if cref != invalidReference { addChild(cref) } first = false } rest, err := expect(']', b) return parent, rest, err } func (p *Parser) parseOptionalWhitespaceCommentNewline(b []byte) (reference, []byte, error) { rootCommentRef := invalidReference latestCommentRef := invalidReference addComment := func(ref reference) { if rootCommentRef == invalidReference { rootCommentRef = ref } else if latestCommentRef == invalidReference { p.builder.AttachChild(rootCommentRef, ref) latestCommentRef = ref } else { p.builder.Chain(latestCommentRef, ref) latestCommentRef = ref } } for len(b) > 0 { var err error b = p.parseWhitespace(b) if len(b) > 0 && b[0] == '#' { var ref reference ref, b, err = p.parseComment(b) if err != nil { return invalidReference, nil, err } if ref != invalidReference { addComment(ref) } } if len(b) == 0 { break } if b[0] == '\n' || b[0] == '\r' { b, err = p.parseNewline(b) if err != nil { return invalidReference, nil, err } } else { break } } return rootCommentRef, b, nil } func (p *Parser) parseMultilineLiteralString(b []byte) ([]byte, []byte, []byte, error) { token, rest, err := scanMultilineLiteralString(b) if err != nil { return nil, nil, nil, err } i := 3 // skip the immediate new line if token[i] == '\n' { i++ } else if token[i] == '\r' && token[i+1] == '\n' { i += 2 } return token, token[i : len(token)-3], rest, err } //nolint:funlen,gocognit,cyclop func (p *Parser) parseMultilineBasicString(b []byte) ([]byte, []byte, []byte, error) { // ml-basic-string = ml-basic-string-delim [ newline ] ml-basic-body // ml-basic-string-delim // ml-basic-string-delim = 3quotation-mark // ml-basic-body = *mlb-content *( mlb-quotes 1*mlb-content ) [ mlb-quotes ] // // mlb-content = mlb-char / newline / mlb-escaped-nl // mlb-char = mlb-unescaped / escaped // mlb-quotes = 1*2quotation-mark // mlb-unescaped = wschar / %x21 / %x23-5B / %x5D-7E / non-ascii // mlb-escaped-nl = escape ws newline *( wschar / newline ) token, escaped, rest, err := scanMultilineBasicString(b) if err != nil { return nil, nil, nil, err } i := 3 // skip the immediate new line if token[i] == '\n' { i++ } else if token[i] == '\r' && token[i+1] == '\n' { i += 2 } // fast path startIdx := i endIdx := len(token) - len(`"""`) if !escaped { str := token[startIdx:endIdx] verr := characters.Utf8TomlValidAlreadyEscaped(str) if verr.Zero() { return token, str, rest, nil } return nil, nil, nil, NewParserError(str[verr.Index:verr.Index+verr.Size], "invalid UTF-8") } var builder bytes.Buffer // The scanner ensures that the token starts and ends with quotes and that // escapes are balanced. for i < len(token)-3 { c := token[i] //nolint:nestif if c == '\\' { // When the last non-whitespace character on a line is an unescaped \, // it will be trimmed along with all whitespace (including newlines) up // to the next non-whitespace character or closing delimiter. isLastNonWhitespaceOnLine := false j := 1 findEOLLoop: for ; j < len(token)-3-i; j++ { switch token[i+j] { case ' ', '\t': continue case '\r': if token[i+j+1] == '\n' { continue } case '\n': isLastNonWhitespaceOnLine = true } break findEOLLoop } if isLastNonWhitespaceOnLine { i += j for ; i < len(token)-3; i++ { c := token[i] if !(c == '\n' || c == '\r' || c == ' ' || c == '\t') { i-- break } } i++ continue } // handle escaping i++ c = token[i] switch c { case '"', '\\': builder.WriteByte(c) case 'b': builder.WriteByte('\b') case 'f': builder.WriteByte('\f') case 'n': builder.WriteByte('\n') case 'r': builder.WriteByte('\r') case 't': builder.WriteByte('\t') case 'e': builder.WriteByte(0x1B) case 'u': x, err := hexToRune(atmost(token[i+1:], 4), 4) if err != nil { return nil, nil, nil, err } builder.WriteRune(x) i += 4 case 'U': x, err := hexToRune(atmost(token[i+1:], 8), 8) if err != nil { return nil, nil, nil, err } builder.WriteRune(x) i += 8 default: return nil, nil, nil, NewParserError(token[i:i+1], "invalid escaped character %#U", c) } i++ } else { size := characters.Utf8ValidNext(token[i:]) if size == 0 { return nil, nil, nil, NewParserError(token[i:i+1], "invalid character %#U", c) } builder.Write(token[i : i+size]) i += size } } return token, builder.Bytes(), rest, nil } func (p *Parser) parseKey(b []byte) (reference, []byte, error) { // key = simple-key / dotted-key // simple-key = quoted-key / unquoted-key // // unquoted-key = 1*( ALPHA / DIGIT / %x2D / %x5F ) ; A-Z / a-z / 0-9 / - / _ // quoted-key = basic-string / literal-string // dotted-key = simple-key 1*( dot-sep simple-key ) // // dot-sep = ws %x2E ws ; . Period raw, key, b, err := p.parseSimpleKey(b) if err != nil { return invalidReference, nil, err } ref := p.builder.Push(Node{ Kind: Key, Raw: p.Range(raw), Data: key, }) for { b = p.parseWhitespace(b) if len(b) > 0 && b[0] == '.' { b = p.parseWhitespace(b[1:]) raw, key, b, err = p.parseSimpleKey(b) if err != nil { return ref, nil, err } p.builder.PushAndChain(Node{ Kind: Key, Raw: p.Range(raw), Data: key, }) } else { break } } return ref, b, nil } func (p *Parser) parseSimpleKey(b []byte) (raw, key, rest []byte, err error) { if len(b) == 0 { return nil, nil, nil, NewParserError(b, "expected key but found none") } // simple-key = quoted-key / unquoted-key // unquoted-key = 1*( ALPHA / DIGIT / %x2D / %x5F ) ; A-Z / a-z / 0-9 / - / _ // quoted-key = basic-string / literal-string switch { case b[0] == '\'': return p.parseLiteralString(b) case b[0] == '"': return p.parseBasicString(b) case isUnquotedKeyChar(b[0]): key, rest = scanUnquotedKey(b) return key, key, rest, nil default: return nil, nil, nil, NewParserError(b[0:1], "invalid character at start of key: %c", b[0]) } } //nolint:funlen,cyclop func (p *Parser) parseBasicString(b []byte) ([]byte, []byte, []byte, error) { // basic-string = quotation-mark *basic-char quotation-mark // quotation-mark = %x22 ; " // basic-char = basic-unescaped / escaped // basic-unescaped = wschar / %x21 / %x23-5B / %x5D-7E / non-ascii // escaped = escape escape-seq-char // escape-seq-char = %x22 ; " quotation mark U+0022 // escape-seq-char =/ %x5C ; \ reverse solidus U+005C // escape-seq-char =/ %x62 ; b backspace U+0008 // escape-seq-char =/ %x66 ; f form feed U+000C // escape-seq-char =/ %x6E ; n line feed U+000A // escape-seq-char =/ %x72 ; r carriage return U+000D // escape-seq-char =/ %x74 ; t tab U+0009 // escape-seq-char =/ %x75 4HEXDIG ; uXXXX U+XXXX // escape-seq-char =/ %x55 8HEXDIG ; UXXXXXXXX U+XXXXXXXX token, escaped, rest, err := scanBasicString(b) if err != nil { return nil, nil, nil, err } startIdx := len(`"`) endIdx := len(token) - len(`"`) // Fast path. If there is no escape sequence, the string should just be // an UTF-8 encoded string, which is the same as Go. In that case, // validate the string and return a direct reference to the buffer. if !escaped { str := token[startIdx:endIdx] verr := characters.Utf8TomlValidAlreadyEscaped(str) if verr.Zero() { return token, str, rest, nil } return nil, nil, nil, NewParserError(str[verr.Index:verr.Index+verr.Size], "invalid UTF-8") } i := startIdx var builder bytes.Buffer // The scanner ensures that the token starts and ends with quotes and that // escapes are balanced. for i < len(token)-1 { c := token[i] if c == '\\' { i++ c = token[i] switch c { case '"', '\\': builder.WriteByte(c) case 'b': builder.WriteByte('\b') case 'f': builder.WriteByte('\f') case 'n': builder.WriteByte('\n') case 'r': builder.WriteByte('\r') case 't': builder.WriteByte('\t') case 'e': builder.WriteByte(0x1B) case 'u': x, err := hexToRune(token[i+1:len(token)-1], 4) if err != nil { return nil, nil, nil, err } builder.WriteRune(x) i += 4 case 'U': x, err := hexToRune(token[i+1:len(token)-1], 8) if err != nil { return nil, nil, nil, err } builder.WriteRune(x) i += 8 default: return nil, nil, nil, NewParserError(token[i:i+1], "invalid escaped character %#U", c) } i++ } else { size := characters.Utf8ValidNext(token[i:]) if size == 0 { return nil, nil, nil, NewParserError(token[i:i+1], "invalid character %#U", c) } builder.Write(token[i : i+size]) i += size } } return token, builder.Bytes(), rest, nil } func hexToRune(b []byte, length int) (rune, error) { if len(b) < length { return -1, NewParserError(b, "unicode point needs %d character, not %d", length, len(b)) } b = b[:length] var r uint32 for i, c := range b { d := uint32(0) switch { case '0' <= c && c <= '9': d = uint32(c - '0') case 'a' <= c && c <= 'f': d = uint32(c - 'a' + 10) case 'A' <= c && c <= 'F': d = uint32(c - 'A' + 10) default: return -1, NewParserError(b[i:i+1], "non-hex character") } r = r*16 + d } if r > unicode.MaxRune || 0xD800 <= r && r < 0xE000 { return -1, NewParserError(b, "escape sequence is invalid Unicode code point") } return rune(r), nil } func (p *Parser) parseWhitespace(b []byte) []byte { // ws = *wschar // wschar = %x20 ; Space // wschar =/ %x09 ; Horizontal tab _, rest := scanWhitespace(b) return rest } //nolint:cyclop func (p *Parser) parseIntOrFloatOrDateTime(b []byte) (reference, []byte, error) { switch b[0] { case 'i': if !scanFollowsInf(b) { return invalidReference, nil, NewParserError(atmost(b, 3), "expected 'inf'") } return p.builder.Push(Node{ Kind: Float, Data: b[:3], Raw: p.Range(b[:3]), }), b[3:], nil case 'n': if !scanFollowsNan(b) { return invalidReference, nil, NewParserError(atmost(b, 3), "expected 'nan'") } return p.builder.Push(Node{ Kind: Float, Data: b[:3], Raw: p.Range(b[:3]), }), b[3:], nil case '+', '-': return p.scanIntOrFloat(b) } if len(b) < 3 { return p.scanIntOrFloat(b) } s := 5 if len(b) < s { s = len(b) } for idx, c := range b[:s] { if isDigit(c) { continue } if idx == 2 && c == ':' || (idx == 4 && c == '-') { return p.scanDateTime(b) } break } return p.scanIntOrFloat(b) } func (p *Parser) scanDateTime(b []byte) (reference, []byte, error) { // scans for contiguous characters in [0-9T:Z.+-], and up to one space if // followed by a digit. hasDate := false hasTime := false hasTz := false seenSpace := false i := 0 byteLoop: for ; i < len(b); i++ { c := b[i] switch { case isDigit(c): case c == '-': hasDate = true const minOffsetOfTz = 8 if i >= minOffsetOfTz { hasTz = true } case c == 'T' || c == 't' || c == ':' || c == '.': hasTime = true case c == '+' || c == '-' || c == 'Z' || c == 'z': hasTz = true case c == ' ': if !seenSpace && i+1 < len(b) && isDigit(b[i+1]) { i += 2 // Avoid reaching past the end of the document in case the time // is malformed. See TestIssue585. if i >= len(b) { i-- } seenSpace = true hasTime = true } else { break byteLoop } default: break byteLoop } } var kind Kind if hasTime { if hasDate { if hasTz { kind = DateTime } else { kind = LocalDateTime } } else { kind = LocalTime } } else { kind = LocalDate } return p.builder.Push(Node{ Kind: kind, Data: b[:i], }), b[i:], nil } //nolint:funlen,gocognit,cyclop func (p *Parser) scanIntOrFloat(b []byte) (reference, []byte, error) { i := 0 if len(b) > 2 && b[0] == '0' && b[1] != '.' && b[1] != 'e' && b[1] != 'E' { var isValidRune validRuneFn switch b[1] { case 'x': isValidRune = isValidHexRune case 'o': isValidRune = isValidOctalRune case 'b': isValidRune = isValidBinaryRune default: i++ } if isValidRune != nil { i += 2 for ; i < len(b); i++ { if !isValidRune(b[i]) { break } } } return p.builder.Push(Node{ Kind: Integer, Data: b[:i], Raw: p.Range(b[:i]), }), b[i:], nil } isFloat := false for ; i < len(b); i++ { c := b[i] if c >= '0' && c <= '9' || c == '+' || c == '-' || c == '_' { continue } if c == '.' || c == 'e' || c == 'E' { isFloat = true continue } if c == 'i' { if scanFollowsInf(b[i:]) { return p.builder.Push(Node{ Kind: Float, Data: b[:i+3], Raw: p.Range(b[:i+3]), }), b[i+3:], nil } return invalidReference, nil, NewParserError(b[i:i+1], "unexpected character 'i' while scanning for a number") } if c == 'n' { if scanFollowsNan(b[i:]) { return p.builder.Push(Node{ Kind: Float, Data: b[:i+3], Raw: p.Range(b[:i+3]), }), b[i+3:], nil } return invalidReference, nil, NewParserError(b[i:i+1], "unexpected character 'n' while scanning for a number") } break } if i == 0 { return invalidReference, b, NewParserError(b, "incomplete number") } kind := Integer if isFloat { kind = Float } return p.builder.Push(Node{ Kind: kind, Data: b[:i], Raw: p.Range(b[:i]), }), b[i:], nil } func isDigit(r byte) bool { return r >= '0' && r <= '9' } type validRuneFn func(r byte) bool func isValidHexRune(r byte) bool { return r >= 'a' && r <= 'f' || r >= 'A' && r <= 'F' || r >= '0' && r <= '9' || r == '_' } func isValidOctalRune(r byte) bool { return r >= '0' && r <= '7' || r == '_' } func isValidBinaryRune(r byte) bool { return r == '0' || r == '1' || r == '_' } func expect(x byte, b []byte) ([]byte, error) { if len(b) == 0 { return nil, NewParserError(b, "expected character %c but the document ended here", x) } if b[0] != x { return nil, NewParserError(b[0:1], "expected character %c", x) } return b[1:], nil }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/pelletier/go-toml/v2/unstable/builder.go
vendor/github.com/pelletier/go-toml/v2/unstable/builder.go
package unstable // root contains a full AST. // // It is immutable once constructed with Builder. type root struct { nodes []Node } // Iterator over the top level nodes. func (r *root) Iterator() Iterator { it := Iterator{} if len(r.nodes) > 0 { it.node = &r.nodes[0] } return it } func (r *root) at(idx reference) *Node { return &r.nodes[idx] } type reference int const invalidReference reference = -1 func (r reference) Valid() bool { return r != invalidReference } type builder struct { tree root lastIdx int } func (b *builder) Tree() *root { return &b.tree } func (b *builder) NodeAt(ref reference) *Node { return b.tree.at(ref) } func (b *builder) Reset() { b.tree.nodes = b.tree.nodes[:0] b.lastIdx = 0 } func (b *builder) Push(n Node) reference { b.lastIdx = len(b.tree.nodes) b.tree.nodes = append(b.tree.nodes, n) return reference(b.lastIdx) } func (b *builder) PushAndChain(n Node) reference { newIdx := len(b.tree.nodes) b.tree.nodes = append(b.tree.nodes, n) if b.lastIdx >= 0 { b.tree.nodes[b.lastIdx].next = newIdx - b.lastIdx } b.lastIdx = newIdx return reference(b.lastIdx) } func (b *builder) AttachChild(parent reference, child reference) { b.tree.nodes[parent].child = int(child) - int(parent) } func (b *builder) Chain(from reference, to reference) { b.tree.nodes[from].next = int(to) - int(from) }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/pelletier/go-toml/v2/unstable/kind.go
vendor/github.com/pelletier/go-toml/v2/unstable/kind.go
package unstable import "fmt" // Kind represents the type of TOML structure contained in a given Node. type Kind int const ( // Meta Invalid Kind = iota Comment Key // Top level structures Table ArrayTable KeyValue // Containers values Array InlineTable // Values String Bool Float Integer LocalDate LocalTime LocalDateTime DateTime ) // String implementation of fmt.Stringer. func (k Kind) String() string { switch k { case Invalid: return "Invalid" case Comment: return "Comment" case Key: return "Key" case Table: return "Table" case ArrayTable: return "ArrayTable" case KeyValue: return "KeyValue" case Array: return "Array" case InlineTable: return "InlineTable" case String: return "String" case Bool: return "Bool" case Float: return "Float" case Integer: return "Integer" case LocalDate: return "LocalDate" case LocalTime: return "LocalTime" case LocalDateTime: return "LocalDateTime" case DateTime: return "DateTime" } panic(fmt.Errorf("Kind.String() not implemented for '%d'", k)) }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/pelletier/go-toml/v2/unstable/scanner.go
vendor/github.com/pelletier/go-toml/v2/unstable/scanner.go
package unstable import "github.com/pelletier/go-toml/v2/internal/characters" func scanFollows(b []byte, pattern string) bool { n := len(pattern) return len(b) >= n && string(b[:n]) == pattern } func scanFollowsMultilineBasicStringDelimiter(b []byte) bool { return scanFollows(b, `"""`) } func scanFollowsMultilineLiteralStringDelimiter(b []byte) bool { return scanFollows(b, `'''`) } func scanFollowsTrue(b []byte) bool { return scanFollows(b, `true`) } func scanFollowsFalse(b []byte) bool { return scanFollows(b, `false`) } func scanFollowsInf(b []byte) bool { return scanFollows(b, `inf`) } func scanFollowsNan(b []byte) bool { return scanFollows(b, `nan`) } func scanUnquotedKey(b []byte) ([]byte, []byte) { // unquoted-key = 1*( ALPHA / DIGIT / %x2D / %x5F ) ; A-Z / a-z / 0-9 / - / _ for i := 0; i < len(b); i++ { if !isUnquotedKeyChar(b[i]) { return b[:i], b[i:] } } return b, b[len(b):] } func isUnquotedKeyChar(r byte) bool { return (r >= 'A' && r <= 'Z') || (r >= 'a' && r <= 'z') || (r >= '0' && r <= '9') || r == '-' || r == '_' } func scanLiteralString(b []byte) ([]byte, []byte, error) { // literal-string = apostrophe *literal-char apostrophe // apostrophe = %x27 ; ' apostrophe // literal-char = %x09 / %x20-26 / %x28-7E / non-ascii for i := 1; i < len(b); { switch b[i] { case '\'': return b[:i+1], b[i+1:], nil case '\n', '\r': return nil, nil, NewParserError(b[i:i+1], "literal strings cannot have new lines") } size := characters.Utf8ValidNext(b[i:]) if size == 0 { return nil, nil, NewParserError(b[i:i+1], "invalid character") } i += size } return nil, nil, NewParserError(b[len(b):], "unterminated literal string") } func scanMultilineLiteralString(b []byte) ([]byte, []byte, error) { // ml-literal-string = ml-literal-string-delim [ newline ] ml-literal-body // ml-literal-string-delim // ml-literal-string-delim = 3apostrophe // ml-literal-body = *mll-content *( mll-quotes 1*mll-content ) [ mll-quotes ] // // mll-content = mll-char / newline // mll-char = %x09 / %x20-26 / %x28-7E / non-ascii // mll-quotes = 1*2apostrophe for i := 3; i < len(b); { switch b[i] { case '\'': if scanFollowsMultilineLiteralStringDelimiter(b[i:]) { i += 3 // At that point we found 3 apostrophe, and i is the // index of the byte after the third one. The scanner // needs to be eager, because there can be an extra 2 // apostrophe that can be accepted at the end of the // string. if i >= len(b) || b[i] != '\'' { return b[:i], b[i:], nil } i++ if i >= len(b) || b[i] != '\'' { return b[:i], b[i:], nil } i++ if i < len(b) && b[i] == '\'' { return nil, nil, NewParserError(b[i-3:i+1], "''' not allowed in multiline literal string") } return b[:i], b[i:], nil } case '\r': if len(b) < i+2 { return nil, nil, NewParserError(b[len(b):], `need a \n after \r`) } if b[i+1] != '\n' { return nil, nil, NewParserError(b[i:i+2], `need a \n after \r`) } i += 2 // skip the \n continue } size := characters.Utf8ValidNext(b[i:]) if size == 0 { return nil, nil, NewParserError(b[i:i+1], "invalid character") } i += size } return nil, nil, NewParserError(b[len(b):], `multiline literal string not terminated by '''`) } func scanWindowsNewline(b []byte) ([]byte, []byte, error) { const lenCRLF = 2 if len(b) < lenCRLF { return nil, nil, NewParserError(b, "windows new line expected") } if b[1] != '\n' { return nil, nil, NewParserError(b, `windows new line should be \r\n`) } return b[:lenCRLF], b[lenCRLF:], nil } func scanWhitespace(b []byte) ([]byte, []byte) { for i := 0; i < len(b); i++ { switch b[i] { case ' ', '\t': continue default: return b[:i], b[i:] } } return b, b[len(b):] } func scanComment(b []byte) ([]byte, []byte, error) { // comment-start-symbol = %x23 ; # // non-ascii = %x80-D7FF / %xE000-10FFFF // non-eol = %x09 / %x20-7F / non-ascii // // comment = comment-start-symbol *non-eol for i := 1; i < len(b); { if b[i] == '\n' { return b[:i], b[i:], nil } if b[i] == '\r' { if i+1 < len(b) && b[i+1] == '\n' { return b[:i+1], b[i+1:], nil } return nil, nil, NewParserError(b[i:i+1], "invalid character in comment") } size := characters.Utf8ValidNext(b[i:]) if size == 0 { return nil, nil, NewParserError(b[i:i+1], "invalid character in comment") } i += size } return b, b[len(b):], nil } func scanBasicString(b []byte) ([]byte, bool, []byte, error) { // basic-string = quotation-mark *basic-char quotation-mark // quotation-mark = %x22 ; " // basic-char = basic-unescaped / escaped // basic-unescaped = wschar / %x21 / %x23-5B / %x5D-7E / non-ascii // escaped = escape escape-seq-char escaped := false i := 1 for ; i < len(b); i++ { switch b[i] { case '"': return b[:i+1], escaped, b[i+1:], nil case '\n', '\r': return nil, escaped, nil, NewParserError(b[i:i+1], "basic strings cannot have new lines") case '\\': if len(b) < i+2 { return nil, escaped, nil, NewParserError(b[i:i+1], "need a character after \\") } escaped = true i++ // skip the next character } } return nil, escaped, nil, NewParserError(b[len(b):], `basic string not terminated by "`) } func scanMultilineBasicString(b []byte) ([]byte, bool, []byte, error) { // ml-basic-string = ml-basic-string-delim [ newline ] ml-basic-body // ml-basic-string-delim // ml-basic-string-delim = 3quotation-mark // ml-basic-body = *mlb-content *( mlb-quotes 1*mlb-content ) [ mlb-quotes ] // // mlb-content = mlb-char / newline / mlb-escaped-nl // mlb-char = mlb-unescaped / escaped // mlb-quotes = 1*2quotation-mark // mlb-unescaped = wschar / %x21 / %x23-5B / %x5D-7E / non-ascii // mlb-escaped-nl = escape ws newline *( wschar / newline ) escaped := false i := 3 for ; i < len(b); i++ { switch b[i] { case '"': if scanFollowsMultilineBasicStringDelimiter(b[i:]) { i += 3 // At that point we found 3 apostrophe, and i is the // index of the byte after the third one. The scanner // needs to be eager, because there can be an extra 2 // apostrophe that can be accepted at the end of the // string. if i >= len(b) || b[i] != '"' { return b[:i], escaped, b[i:], nil } i++ if i >= len(b) || b[i] != '"' { return b[:i], escaped, b[i:], nil } i++ if i < len(b) && b[i] == '"' { return nil, escaped, nil, NewParserError(b[i-3:i+1], `""" not allowed in multiline basic string`) } return b[:i], escaped, b[i:], nil } case '\\': if len(b) < i+2 { return nil, escaped, nil, NewParserError(b[len(b):], "need a character after \\") } escaped = true i++ // skip the next character case '\r': if len(b) < i+2 { return nil, escaped, nil, NewParserError(b[len(b):], `need a \n after \r`) } if b[i+1] != '\n' { return nil, escaped, nil, NewParserError(b[i:i+2], `need a \n after \r`) } i++ // skip the \n } } return nil, escaped, nil, NewParserError(b[len(b):], `multiline basic string not terminated by """`) }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/pelletier/go-toml/v2/unstable/ast.go
vendor/github.com/pelletier/go-toml/v2/unstable/ast.go
package unstable import ( "fmt" "unsafe" "github.com/pelletier/go-toml/v2/internal/danger" ) // Iterator over a sequence of nodes. // // Starts uninitialized, you need to call Next() first. // // For example: // // it := n.Children() // for it.Next() { // n := it.Node() // // do something with n // } type Iterator struct { started bool node *Node } // Next moves the iterator forward and returns true if points to a // node, false otherwise. func (c *Iterator) Next() bool { if !c.started { c.started = true } else if c.node.Valid() { c.node = c.node.Next() } return c.node.Valid() } // IsLast returns true if the current node of the iterator is the last // one. Subsequent calls to Next() will return false. func (c *Iterator) IsLast() bool { return c.node.next == 0 } // Node returns a pointer to the node pointed at by the iterator. func (c *Iterator) Node() *Node { return c.node } // Node in a TOML expression AST. // // Depending on Kind, its sequence of children should be interpreted // differently. // // - Array have one child per element in the array. // - InlineTable have one child per key-value in the table (each of kind // InlineTable). // - KeyValue have at least two children. The first one is the value. The rest // make a potentially dotted key. // - Table and ArrayTable's children represent a dotted key (same as // KeyValue, but without the first node being the value). // // When relevant, Raw describes the range of bytes this node is referring to in // the input document. Use Parser.Raw() to retrieve the actual bytes. type Node struct { Kind Kind Raw Range // Raw bytes from the input. Data []byte // Node value (either allocated or referencing the input). // References to other nodes, as offsets in the backing array // from this node. References can go backward, so those can be // negative. next int // 0 if last element child int // 0 if no child } // Range of bytes in the document. type Range struct { Offset uint32 Length uint32 } // Next returns a pointer to the next node, or nil if there is no next node. func (n *Node) Next() *Node { if n.next == 0 { return nil } ptr := unsafe.Pointer(n) size := unsafe.Sizeof(Node{}) return (*Node)(danger.Stride(ptr, size, n.next)) } // Child returns a pointer to the first child node of this node. Other children // can be accessed calling Next on the first child. Returns an nil if this Node // has no child. func (n *Node) Child() *Node { if n.child == 0 { return nil } ptr := unsafe.Pointer(n) size := unsafe.Sizeof(Node{}) return (*Node)(danger.Stride(ptr, size, n.child)) } // Valid returns true if the node's kind is set (not to Invalid). func (n *Node) Valid() bool { return n != nil } // Key returns the children nodes making the Key on a supported node. Panics // otherwise. They are guaranteed to be all be of the Kind Key. A simple key // would return just one element. func (n *Node) Key() Iterator { switch n.Kind { case KeyValue: value := n.Child() if !value.Valid() { panic(fmt.Errorf("KeyValue should have at least two children")) } return Iterator{node: value.Next()} case Table, ArrayTable: return Iterator{node: n.Child()} default: panic(fmt.Errorf("Key() is not supported on a %s", n.Kind)) } } // Value returns a pointer to the value node of a KeyValue. // Guaranteed to be non-nil. Panics if not called on a KeyValue node, // or if the Children are malformed. func (n *Node) Value() *Node { return n.Child() } // Children returns an iterator over a node's children. func (n *Node) Children() Iterator { return Iterator{node: n.Child()} }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/pelletier/go-toml/v2/unstable/doc.go
vendor/github.com/pelletier/go-toml/v2/unstable/doc.go
// Package unstable provides APIs that do not meet the backward compatibility // guarantees yet. package unstable
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/pelletier/go-toml/v2/unstable/unmarshaler.go
vendor/github.com/pelletier/go-toml/v2/unstable/unmarshaler.go
package unstable // The Unmarshaler interface may be implemented by types to customize their // behavior when being unmarshaled from a TOML document. type Unmarshaler interface { UnmarshalTOML(value *Node) error }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/pelletier/go-toml/v2/internal/tracker/tracker.go
vendor/github.com/pelletier/go-toml/v2/internal/tracker/tracker.go
package tracker
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/pelletier/go-toml/v2/internal/tracker/seen.go
vendor/github.com/pelletier/go-toml/v2/internal/tracker/seen.go
package tracker import ( "bytes" "fmt" "sync" "github.com/pelletier/go-toml/v2/unstable" ) type keyKind uint8 const ( invalidKind keyKind = iota valueKind tableKind arrayTableKind ) func (k keyKind) String() string { switch k { case invalidKind: return "invalid" case valueKind: return "value" case tableKind: return "table" case arrayTableKind: return "array table" } panic("missing keyKind string mapping") } // SeenTracker tracks which keys have been seen with which TOML type to flag // duplicates and mismatches according to the spec. // // Each node in the visited tree is represented by an entry. Each entry has an // identifier, which is provided by a counter. Entries are stored in the array // entries. As new nodes are discovered (referenced for the first time in the // TOML document), entries are created and appended to the array. An entry // points to its parent using its id. // // To find whether a given key (sequence of []byte) has already been visited, // the entries are linearly searched, looking for one with the right name and // parent id. // // Given that all keys appear in the document after their parent, it is // guaranteed that all descendants of a node are stored after the node, this // speeds up the search process. // // When encountering [[array tables]], the descendants of that node are removed // to allow that branch of the tree to be "rediscovered". To maintain the // invariant above, the deletion process needs to keep the order of entries. // This results in more copies in that case. type SeenTracker struct { entries []entry currentIdx int } var pool = sync.Pool{ New: func() interface{} { return &SeenTracker{} }, } func (s *SeenTracker) reset() { // Always contains a root element at index 0. s.currentIdx = 0 if len(s.entries) == 0 { s.entries = make([]entry, 1, 2) } else { s.entries = s.entries[:1] } s.entries[0].child = -1 s.entries[0].next = -1 } type entry struct { // Use -1 to indicate no child or no sibling. child int next int name []byte kind keyKind explicit bool kv bool } // Find the index of the child of parentIdx with key k. Returns -1 if // it does not exist. func (s *SeenTracker) find(parentIdx int, k []byte) int { for i := s.entries[parentIdx].child; i >= 0; i = s.entries[i].next { if bytes.Equal(s.entries[i].name, k) { return i } } return -1 } // Remove all descendants of node at position idx. func (s *SeenTracker) clear(idx int) { if idx >= len(s.entries) { return } for i := s.entries[idx].child; i >= 0; { next := s.entries[i].next n := s.entries[0].next s.entries[0].next = i s.entries[i].next = n s.entries[i].name = nil s.clear(i) i = next } s.entries[idx].child = -1 } func (s *SeenTracker) create(parentIdx int, name []byte, kind keyKind, explicit bool, kv bool) int { e := entry{ child: -1, next: s.entries[parentIdx].child, name: name, kind: kind, explicit: explicit, kv: kv, } var idx int if s.entries[0].next >= 0 { idx = s.entries[0].next s.entries[0].next = s.entries[idx].next s.entries[idx] = e } else { idx = len(s.entries) s.entries = append(s.entries, e) } s.entries[parentIdx].child = idx return idx } func (s *SeenTracker) setExplicitFlag(parentIdx int) { for i := s.entries[parentIdx].child; i >= 0; i = s.entries[i].next { if s.entries[i].kv { s.entries[i].explicit = true s.entries[i].kv = false } s.setExplicitFlag(i) } } // CheckExpression takes a top-level node and checks that it does not contain // keys that have been seen in previous calls, and validates that types are // consistent. It returns true if it is the first time this node's key is seen. // Useful to clear array tables on first use. func (s *SeenTracker) CheckExpression(node *unstable.Node) (bool, error) { if s.entries == nil { s.reset() } switch node.Kind { case unstable.KeyValue: return s.checkKeyValue(node) case unstable.Table: return s.checkTable(node) case unstable.ArrayTable: return s.checkArrayTable(node) default: panic(fmt.Errorf("this should not be a top level node type: %s", node.Kind)) } } func (s *SeenTracker) checkTable(node *unstable.Node) (bool, error) { if s.currentIdx >= 0 { s.setExplicitFlag(s.currentIdx) } it := node.Key() parentIdx := 0 // This code is duplicated in checkArrayTable. This is because factoring // it in a function requires to copy the iterator, or allocate it to the // heap, which is not cheap. for it.Next() { if it.IsLast() { break } k := it.Node().Data idx := s.find(parentIdx, k) if idx < 0 { idx = s.create(parentIdx, k, tableKind, false, false) } else { entry := s.entries[idx] if entry.kind == valueKind { return false, fmt.Errorf("toml: expected %s to be a table, not a %s", string(k), entry.kind) } } parentIdx = idx } k := it.Node().Data idx := s.find(parentIdx, k) first := false if idx >= 0 { kind := s.entries[idx].kind if kind != tableKind { return false, fmt.Errorf("toml: key %s should be a table, not a %s", string(k), kind) } if s.entries[idx].explicit { return false, fmt.Errorf("toml: table %s already exists", string(k)) } s.entries[idx].explicit = true } else { idx = s.create(parentIdx, k, tableKind, true, false) first = true } s.currentIdx = idx return first, nil } func (s *SeenTracker) checkArrayTable(node *unstable.Node) (bool, error) { if s.currentIdx >= 0 { s.setExplicitFlag(s.currentIdx) } it := node.Key() parentIdx := 0 for it.Next() { if it.IsLast() { break } k := it.Node().Data idx := s.find(parentIdx, k) if idx < 0 { idx = s.create(parentIdx, k, tableKind, false, false) } else { entry := s.entries[idx] if entry.kind == valueKind { return false, fmt.Errorf("toml: expected %s to be a table, not a %s", string(k), entry.kind) } } parentIdx = idx } k := it.Node().Data idx := s.find(parentIdx, k) firstTime := idx < 0 if firstTime { idx = s.create(parentIdx, k, arrayTableKind, true, false) } else { kind := s.entries[idx].kind if kind != arrayTableKind { return false, fmt.Errorf("toml: key %s already exists as a %s, but should be an array table", kind, string(k)) } s.clear(idx) } s.currentIdx = idx return firstTime, nil } func (s *SeenTracker) checkKeyValue(node *unstable.Node) (bool, error) { parentIdx := s.currentIdx it := node.Key() for it.Next() { k := it.Node().Data idx := s.find(parentIdx, k) if idx < 0 { idx = s.create(parentIdx, k, tableKind, false, true) } else { entry := s.entries[idx] if it.IsLast() { return false, fmt.Errorf("toml: key %s is already defined", string(k)) } else if entry.kind != tableKind { return false, fmt.Errorf("toml: expected %s to be a table, not a %s", string(k), entry.kind) } else if entry.explicit { return false, fmt.Errorf("toml: cannot redefine table %s that has already been explicitly defined", string(k)) } } parentIdx = idx } s.entries[parentIdx].kind = valueKind value := node.Value() switch value.Kind { case unstable.InlineTable: return s.checkInlineTable(value) case unstable.Array: return s.checkArray(value) } return false, nil } func (s *SeenTracker) checkArray(node *unstable.Node) (first bool, err error) { it := node.Children() for it.Next() { n := it.Node() switch n.Kind { case unstable.InlineTable: first, err = s.checkInlineTable(n) if err != nil { return false, err } case unstable.Array: first, err = s.checkArray(n) if err != nil { return false, err } } } return first, nil } func (s *SeenTracker) checkInlineTable(node *unstable.Node) (first bool, err error) { s = pool.Get().(*SeenTracker) s.reset() it := node.Children() for it.Next() { n := it.Node() first, err = s.checkKeyValue(n) if err != nil { return false, err } } // As inline tables are self-contained, the tracker does not // need to retain the details of what they contain. The // keyValue element that creates the inline table is kept to // mark the presence of the inline table and prevent // redefinition of its keys: check* functions cannot walk into // a value. pool.Put(s) return first, nil }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/pelletier/go-toml/v2/internal/tracker/key.go
vendor/github.com/pelletier/go-toml/v2/internal/tracker/key.go
package tracker import "github.com/pelletier/go-toml/v2/unstable" // KeyTracker is a tracker that keeps track of the current Key as the AST is // walked. type KeyTracker struct { k []string } // UpdateTable sets the state of the tracker with the AST table node. func (t *KeyTracker) UpdateTable(node *unstable.Node) { t.reset() t.Push(node) } // UpdateArrayTable sets the state of the tracker with the AST array table node. func (t *KeyTracker) UpdateArrayTable(node *unstable.Node) { t.reset() t.Push(node) } // Push the given key on the stack. func (t *KeyTracker) Push(node *unstable.Node) { it := node.Key() for it.Next() { t.k = append(t.k, string(it.Node().Data)) } } // Pop key from stack. func (t *KeyTracker) Pop(node *unstable.Node) { it := node.Key() for it.Next() { t.k = t.k[:len(t.k)-1] } } // Key returns the current key func (t *KeyTracker) Key() []string { k := make([]string, len(t.k)) copy(k, t.k) return k } func (t *KeyTracker) reset() { t.k = t.k[:0] }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/pelletier/go-toml/v2/internal/danger/typeid.go
vendor/github.com/pelletier/go-toml/v2/internal/danger/typeid.go
package danger import ( "reflect" "unsafe" ) // typeID is used as key in encoder and decoder caches to enable using // the optimize runtime.mapaccess2_fast64 function instead of the more // expensive lookup if we were to use reflect.Type as map key. // // typeID holds the pointer to the reflect.Type value, which is unique // in the program. // // https://github.com/segmentio/encoding/blob/master/json/codec.go#L59-L61 type TypeID unsafe.Pointer func MakeTypeID(t reflect.Type) TypeID { // reflect.Type has the fields: // typ unsafe.Pointer // ptr unsafe.Pointer return TypeID((*[2]unsafe.Pointer)(unsafe.Pointer(&t))[1]) }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/pelletier/go-toml/v2/internal/danger/danger.go
vendor/github.com/pelletier/go-toml/v2/internal/danger/danger.go
package danger import ( "fmt" "reflect" "unsafe" ) const maxInt = uintptr(int(^uint(0) >> 1)) func SubsliceOffset(data []byte, subslice []byte) int { datap := (*reflect.SliceHeader)(unsafe.Pointer(&data)) hlp := (*reflect.SliceHeader)(unsafe.Pointer(&subslice)) if hlp.Data < datap.Data { panic(fmt.Errorf("subslice address (%d) is before data address (%d)", hlp.Data, datap.Data)) } offset := hlp.Data - datap.Data if offset > maxInt { panic(fmt.Errorf("slice offset larger than int (%d)", offset)) } intoffset := int(offset) if intoffset > datap.Len { panic(fmt.Errorf("slice offset (%d) is farther than data length (%d)", intoffset, datap.Len)) } if intoffset+hlp.Len > datap.Len { panic(fmt.Errorf("slice ends (%d+%d) is farther than data length (%d)", intoffset, hlp.Len, datap.Len)) } return intoffset } func BytesRange(start []byte, end []byte) []byte { if start == nil || end == nil { panic("cannot call BytesRange with nil") } startp := (*reflect.SliceHeader)(unsafe.Pointer(&start)) endp := (*reflect.SliceHeader)(unsafe.Pointer(&end)) if startp.Data > endp.Data { panic(fmt.Errorf("start pointer address (%d) is after end pointer address (%d)", startp.Data, endp.Data)) } l := startp.Len endLen := int(endp.Data-startp.Data) + endp.Len if endLen > l { l = endLen } if l > startp.Cap { panic(fmt.Errorf("range length is larger than capacity")) } return start[:l] } func Stride(ptr unsafe.Pointer, size uintptr, offset int) unsafe.Pointer { // TODO: replace with unsafe.Add when Go 1.17 is released // https://github.com/golang/go/issues/40481 return unsafe.Pointer(uintptr(ptr) + uintptr(int(size)*offset)) }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/pelletier/go-toml/v2/internal/characters/utf8.go
vendor/github.com/pelletier/go-toml/v2/internal/characters/utf8.go
package characters import ( "unicode/utf8" ) type utf8Err struct { Index int Size int } func (u utf8Err) Zero() bool { return u.Size == 0 } // Verified that a given string is only made of valid UTF-8 characters allowed // by the TOML spec: // // Any Unicode character may be used except those that must be escaped: // quotation mark, backslash, and the control characters other than tab (U+0000 // to U+0008, U+000A to U+001F, U+007F). // // It is a copy of the Go 1.17 utf8.Valid implementation, tweaked to exit early // when a character is not allowed. // // The returned utf8Err is Zero() if the string is valid, or contains the byte // index and size of the invalid character. // // quotation mark => already checked // backslash => already checked // 0-0x8 => invalid // 0x9 => tab, ok // 0xA - 0x1F => invalid // 0x7F => invalid func Utf8TomlValidAlreadyEscaped(p []byte) (err utf8Err) { // Fast path. Check for and skip 8 bytes of ASCII characters per iteration. offset := 0 for len(p) >= 8 { // Combining two 32 bit loads allows the same code to be used // for 32 and 64 bit platforms. // The compiler can generate a 32bit load for first32 and second32 // on many platforms. See test/codegen/memcombine.go. first32 := uint32(p[0]) | uint32(p[1])<<8 | uint32(p[2])<<16 | uint32(p[3])<<24 second32 := uint32(p[4]) | uint32(p[5])<<8 | uint32(p[6])<<16 | uint32(p[7])<<24 if (first32|second32)&0x80808080 != 0 { // Found a non ASCII byte (>= RuneSelf). break } for i, b := range p[:8] { if InvalidAscii(b) { err.Index = offset + i err.Size = 1 return } } p = p[8:] offset += 8 } n := len(p) for i := 0; i < n; { pi := p[i] if pi < utf8.RuneSelf { if InvalidAscii(pi) { err.Index = offset + i err.Size = 1 return } i++ continue } x := first[pi] if x == xx { // Illegal starter byte. err.Index = offset + i err.Size = 1 return } size := int(x & 7) if i+size > n { // Short or invalid. err.Index = offset + i err.Size = n - i return } accept := acceptRanges[x>>4] if c := p[i+1]; c < accept.lo || accept.hi < c { err.Index = offset + i err.Size = 2 return } else if size == 2 { } else if c := p[i+2]; c < locb || hicb < c { err.Index = offset + i err.Size = 3 return } else if size == 3 { } else if c := p[i+3]; c < locb || hicb < c { err.Index = offset + i err.Size = 4 return } i += size } return } // Return the size of the next rune if valid, 0 otherwise. func Utf8ValidNext(p []byte) int { c := p[0] if c < utf8.RuneSelf { if InvalidAscii(c) { return 0 } return 1 } x := first[c] if x == xx { // Illegal starter byte. return 0 } size := int(x & 7) if size > len(p) { // Short or invalid. return 0 } accept := acceptRanges[x>>4] if c := p[1]; c < accept.lo || accept.hi < c { return 0 } else if size == 2 { } else if c := p[2]; c < locb || hicb < c { return 0 } else if size == 3 { } else if c := p[3]; c < locb || hicb < c { return 0 } return size } // acceptRange gives the range of valid values for the second byte in a UTF-8 // sequence. type acceptRange struct { lo uint8 // lowest value for second byte. hi uint8 // highest value for second byte. } // acceptRanges has size 16 to avoid bounds checks in the code that uses it. var acceptRanges = [16]acceptRange{ 0: {locb, hicb}, 1: {0xA0, hicb}, 2: {locb, 0x9F}, 3: {0x90, hicb}, 4: {locb, 0x8F}, } // first is information about the first byte in a UTF-8 sequence. var first = [256]uint8{ // 1 2 3 4 5 6 7 8 9 A B C D E F as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x00-0x0F as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x10-0x1F as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x20-0x2F as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x30-0x3F as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x40-0x4F as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x50-0x5F as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x60-0x6F as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x70-0x7F // 1 2 3 4 5 6 7 8 9 A B C D E F xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0x80-0x8F xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0x90-0x9F xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0xA0-0xAF xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0xB0-0xBF xx, xx, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, // 0xC0-0xCF s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, // 0xD0-0xDF s2, s3, s3, s3, s3, s3, s3, s3, s3, s3, s3, s3, s3, s4, s3, s3, // 0xE0-0xEF s5, s6, s6, s6, s7, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0xF0-0xFF } const ( // The default lowest and highest continuation byte. locb = 0b10000000 hicb = 0b10111111 // These names of these constants are chosen to give nice alignment in the // table below. The first nibble is an index into acceptRanges or F for // special one-byte cases. The second nibble is the Rune length or the // Status for the special one-byte case. xx = 0xF1 // invalid: size 1 as = 0xF0 // ASCII: size 1 s1 = 0x02 // accept 0, size 2 s2 = 0x13 // accept 1, size 3 s3 = 0x03 // accept 0, size 3 s4 = 0x23 // accept 2, size 3 s5 = 0x34 // accept 3, size 4 s6 = 0x04 // accept 0, size 4 s7 = 0x44 // accept 4, size 4 )
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/pelletier/go-toml/v2/internal/characters/ascii.go
vendor/github.com/pelletier/go-toml/v2/internal/characters/ascii.go
package characters var invalidAsciiTable = [256]bool{ 0x00: true, 0x01: true, 0x02: true, 0x03: true, 0x04: true, 0x05: true, 0x06: true, 0x07: true, 0x08: true, // 0x09 TAB // 0x0A LF 0x0B: true, 0x0C: true, // 0x0D CR 0x0E: true, 0x0F: true, 0x10: true, 0x11: true, 0x12: true, 0x13: true, 0x14: true, 0x15: true, 0x16: true, 0x17: true, 0x18: true, 0x19: true, 0x1A: true, 0x1B: true, 0x1C: true, 0x1D: true, 0x1E: true, 0x1F: true, // 0x20 - 0x7E Printable ASCII characters 0x7F: true, } func InvalidAscii(b byte) bool { return invalidAsciiTable[b] }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go
vendor/github.com/cespare/xxhash/v2/xxhash_safe.go
//go:build appengine // +build appengine // This file contains the safe implementations of otherwise unsafe-using code. package xxhash // Sum64String computes the 64-bit xxHash digest of s with a zero seed. func Sum64String(s string) uint64 { return Sum64([]byte(s)) } // WriteString adds more data to d. It always returns len(s), nil. func (d *Digest) WriteString(s string) (n int, err error) { return d.Write([]byte(s)) }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go
vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go
//go:build !appengine // +build !appengine // This file encapsulates usage of unsafe. // xxhash_safe.go contains the safe implementations. package xxhash import ( "unsafe" ) // In the future it's possible that compiler optimizations will make these // XxxString functions unnecessary by realizing that calls such as // Sum64([]byte(s)) don't need to copy s. See https://go.dev/issue/2205. // If that happens, even if we keep these functions they can be replaced with // the trivial safe code. // NOTE: The usual way of doing an unsafe string-to-[]byte conversion is: // // var b []byte // bh := (*reflect.SliceHeader)(unsafe.Pointer(&b)) // bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data // bh.Len = len(s) // bh.Cap = len(s) // // Unfortunately, as of Go 1.15.3 the inliner's cost model assigns a high enough // weight to this sequence of expressions that any function that uses it will // not be inlined. Instead, the functions below use a different unsafe // conversion designed to minimize the inliner weight and allow both to be // inlined. There is also a test (TestInlining) which verifies that these are // inlined. // // See https://github.com/golang/go/issues/42739 for discussion. // Sum64String computes the 64-bit xxHash digest of s with a zero seed. // It may be faster than Sum64([]byte(s)) by avoiding a copy. func Sum64String(s string) uint64 { b := *(*[]byte)(unsafe.Pointer(&sliceHeader{s, len(s)})) return Sum64(b) } // WriteString adds more data to d. It always returns len(s), nil. // It may be faster than Write([]byte(s)) by avoiding a copy. func (d *Digest) WriteString(s string) (n int, err error) { d.Write(*(*[]byte)(unsafe.Pointer(&sliceHeader{s, len(s)}))) // d.Write always returns len(s), nil. // Ignoring the return output and returning these fixed values buys a // savings of 6 in the inliner's cost model. return len(s), nil } // sliceHeader is similar to reflect.SliceHeader, but it assumes that the layout // of the first two words is the same as the layout of a string. type sliceHeader struct { s string cap int }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/cespare/xxhash/v2/xxhash.go
vendor/github.com/cespare/xxhash/v2/xxhash.go
// Package xxhash implements the 64-bit variant of xxHash (XXH64) as described // at http://cyan4973.github.io/xxHash/. package xxhash import ( "encoding/binary" "errors" "math/bits" ) const ( prime1 uint64 = 11400714785074694791 prime2 uint64 = 14029467366897019727 prime3 uint64 = 1609587929392839161 prime4 uint64 = 9650029242287828579 prime5 uint64 = 2870177450012600261 ) // Store the primes in an array as well. // // The consts are used when possible in Go code to avoid MOVs but we need a // contiguous array for the assembly code. var primes = [...]uint64{prime1, prime2, prime3, prime4, prime5} // Digest implements hash.Hash64. // // Note that a zero-valued Digest is not ready to receive writes. // Call Reset or create a Digest using New before calling other methods. type Digest struct { v1 uint64 v2 uint64 v3 uint64 v4 uint64 total uint64 mem [32]byte n int // how much of mem is used } // New creates a new Digest with a zero seed. func New() *Digest { return NewWithSeed(0) } // NewWithSeed creates a new Digest with the given seed. func NewWithSeed(seed uint64) *Digest { var d Digest d.ResetWithSeed(seed) return &d } // Reset clears the Digest's state so that it can be reused. // It uses a seed value of zero. func (d *Digest) Reset() { d.ResetWithSeed(0) } // ResetWithSeed clears the Digest's state so that it can be reused. // It uses the given seed to initialize the state. func (d *Digest) ResetWithSeed(seed uint64) { d.v1 = seed + prime1 + prime2 d.v2 = seed + prime2 d.v3 = seed d.v4 = seed - prime1 d.total = 0 d.n = 0 } // Size always returns 8 bytes. func (d *Digest) Size() int { return 8 } // BlockSize always returns 32 bytes. func (d *Digest) BlockSize() int { return 32 } // Write adds more data to d. It always returns len(b), nil. func (d *Digest) Write(b []byte) (n int, err error) { n = len(b) d.total += uint64(n) memleft := d.mem[d.n&(len(d.mem)-1):] if d.n+n < 32 { // This new data doesn't even fill the current block. copy(memleft, b) d.n += n return } if d.n > 0 { // Finish off the partial block. c := copy(memleft, b) d.v1 = round(d.v1, u64(d.mem[0:8])) d.v2 = round(d.v2, u64(d.mem[8:16])) d.v3 = round(d.v3, u64(d.mem[16:24])) d.v4 = round(d.v4, u64(d.mem[24:32])) b = b[c:] d.n = 0 } if len(b) >= 32 { // One or more full blocks left. nw := writeBlocks(d, b) b = b[nw:] } // Store any remaining partial block. copy(d.mem[:], b) d.n = len(b) return } // Sum appends the current hash to b and returns the resulting slice. func (d *Digest) Sum(b []byte) []byte { s := d.Sum64() return append( b, byte(s>>56), byte(s>>48), byte(s>>40), byte(s>>32), byte(s>>24), byte(s>>16), byte(s>>8), byte(s), ) } // Sum64 returns the current hash. func (d *Digest) Sum64() uint64 { var h uint64 if d.total >= 32 { v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4 h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) h = mergeRound(h, v1) h = mergeRound(h, v2) h = mergeRound(h, v3) h = mergeRound(h, v4) } else { h = d.v3 + prime5 } h += d.total b := d.mem[:d.n&(len(d.mem)-1)] for ; len(b) >= 8; b = b[8:] { k1 := round(0, u64(b[:8])) h ^= k1 h = rol27(h)*prime1 + prime4 } if len(b) >= 4 { h ^= uint64(u32(b[:4])) * prime1 h = rol23(h)*prime2 + prime3 b = b[4:] } for ; len(b) > 0; b = b[1:] { h ^= uint64(b[0]) * prime5 h = rol11(h) * prime1 } h ^= h >> 33 h *= prime2 h ^= h >> 29 h *= prime3 h ^= h >> 32 return h } const ( magic = "xxh\x06" marshaledSize = len(magic) + 8*5 + 32 ) // MarshalBinary implements the encoding.BinaryMarshaler interface. func (d *Digest) MarshalBinary() ([]byte, error) { b := make([]byte, 0, marshaledSize) b = append(b, magic...) b = appendUint64(b, d.v1) b = appendUint64(b, d.v2) b = appendUint64(b, d.v3) b = appendUint64(b, d.v4) b = appendUint64(b, d.total) b = append(b, d.mem[:d.n]...) b = b[:len(b)+len(d.mem)-d.n] return b, nil } // UnmarshalBinary implements the encoding.BinaryUnmarshaler interface. func (d *Digest) UnmarshalBinary(b []byte) error { if len(b) < len(magic) || string(b[:len(magic)]) != magic { return errors.New("xxhash: invalid hash state identifier") } if len(b) != marshaledSize { return errors.New("xxhash: invalid hash state size") } b = b[len(magic):] b, d.v1 = consumeUint64(b) b, d.v2 = consumeUint64(b) b, d.v3 = consumeUint64(b) b, d.v4 = consumeUint64(b) b, d.total = consumeUint64(b) copy(d.mem[:], b) d.n = int(d.total % uint64(len(d.mem))) return nil } func appendUint64(b []byte, x uint64) []byte { var a [8]byte binary.LittleEndian.PutUint64(a[:], x) return append(b, a[:]...) } func consumeUint64(b []byte) ([]byte, uint64) { x := u64(b) return b[8:], x } func u64(b []byte) uint64 { return binary.LittleEndian.Uint64(b) } func u32(b []byte) uint32 { return binary.LittleEndian.Uint32(b) } func round(acc, input uint64) uint64 { acc += input * prime2 acc = rol31(acc) acc *= prime1 return acc } func mergeRound(acc, val uint64) uint64 { val = round(0, val) acc ^= val acc = acc*prime1 + prime4 return acc } func rol1(x uint64) uint64 { return bits.RotateLeft64(x, 1) } func rol7(x uint64) uint64 { return bits.RotateLeft64(x, 7) } func rol11(x uint64) uint64 { return bits.RotateLeft64(x, 11) } func rol12(x uint64) uint64 { return bits.RotateLeft64(x, 12) } func rol18(x uint64) uint64 { return bits.RotateLeft64(x, 18) } func rol23(x uint64) uint64 { return bits.RotateLeft64(x, 23) } func rol27(x uint64) uint64 { return bits.RotateLeft64(x, 27) } func rol31(x uint64) uint64 { return bits.RotateLeft64(x, 31) }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/cespare/xxhash/v2/xxhash_other.go
vendor/github.com/cespare/xxhash/v2/xxhash_other.go
//go:build (!amd64 && !arm64) || appengine || !gc || purego // +build !amd64,!arm64 appengine !gc purego package xxhash // Sum64 computes the 64-bit xxHash digest of b with a zero seed. func Sum64(b []byte) uint64 { // A simpler version would be // d := New() // d.Write(b) // return d.Sum64() // but this is faster, particularly for small inputs. n := len(b) var h uint64 if n >= 32 { v1 := primes[0] + prime2 v2 := prime2 v3 := uint64(0) v4 := -primes[0] for len(b) >= 32 { v1 = round(v1, u64(b[0:8:len(b)])) v2 = round(v2, u64(b[8:16:len(b)])) v3 = round(v3, u64(b[16:24:len(b)])) v4 = round(v4, u64(b[24:32:len(b)])) b = b[32:len(b):len(b)] } h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) h = mergeRound(h, v1) h = mergeRound(h, v2) h = mergeRound(h, v3) h = mergeRound(h, v4) } else { h = prime5 } h += uint64(n) for ; len(b) >= 8; b = b[8:] { k1 := round(0, u64(b[:8])) h ^= k1 h = rol27(h)*prime1 + prime4 } if len(b) >= 4 { h ^= uint64(u32(b[:4])) * prime1 h = rol23(h)*prime2 + prime3 b = b[4:] } for ; len(b) > 0; b = b[1:] { h ^= uint64(b[0]) * prime5 h = rol11(h) * prime1 } h ^= h >> 33 h *= prime2 h ^= h >> 29 h *= prime3 h ^= h >> 32 return h } func writeBlocks(d *Digest, b []byte) int { v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4 n := len(b) for len(b) >= 32 { v1 = round(v1, u64(b[0:8:len(b)])) v2 = round(v2, u64(b[8:16:len(b)])) v3 = round(v3, u64(b[16:24:len(b)])) v4 = round(v4, u64(b[24:32:len(b)])) b = b[32:len(b):len(b)] } d.v1, d.v2, d.v3, d.v4 = v1, v2, v3, v4 return n - len(b) }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go
vendor/github.com/cespare/xxhash/v2/xxhash_asm.go
//go:build (amd64 || arm64) && !appengine && gc && !purego // +build amd64 arm64 // +build !appengine // +build gc // +build !purego package xxhash // Sum64 computes the 64-bit xxHash digest of b with a zero seed. // //go:noescape func Sum64(b []byte) uint64 //go:noescape func writeBlocks(d *Digest, b []byte) int
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/gorilla/websocket/json.go
vendor/github.com/gorilla/websocket/json.go
// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package websocket import ( "encoding/json" "io" ) // WriteJSON writes the JSON encoding of v as a message. // // Deprecated: Use c.WriteJSON instead. func WriteJSON(c *Conn, v interface{}) error { return c.WriteJSON(v) } // WriteJSON writes the JSON encoding of v as a message. // // See the documentation for encoding/json Marshal for details about the // conversion of Go values to JSON. func (c *Conn) WriteJSON(v interface{}) error { w, err := c.NextWriter(TextMessage) if err != nil { return err } err1 := json.NewEncoder(w).Encode(v) err2 := w.Close() if err1 != nil { return err1 } return err2 } // ReadJSON reads the next JSON-encoded message from the connection and stores // it in the value pointed to by v. // // Deprecated: Use c.ReadJSON instead. func ReadJSON(c *Conn, v interface{}) error { return c.ReadJSON(v) } // ReadJSON reads the next JSON-encoded message from the connection and stores // it in the value pointed to by v. // // See the documentation for the encoding/json Unmarshal function for details // about the conversion of JSON to a Go value. func (c *Conn) ReadJSON(v interface{}) error { _, r, err := c.NextReader() if err != nil { return err } err = json.NewDecoder(r).Decode(v) if err == io.EOF { // One value is expected in the message. err = io.ErrUnexpectedEOF } return err }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/gorilla/websocket/proxy.go
vendor/github.com/gorilla/websocket/proxy.go
// Copyright 2017 The Gorilla WebSocket Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package websocket import ( "bufio" "encoding/base64" "errors" "net" "net/http" "net/url" "strings" ) type netDialerFunc func(network, addr string) (net.Conn, error) func (fn netDialerFunc) Dial(network, addr string) (net.Conn, error) { return fn(network, addr) } func init() { proxy_RegisterDialerType("http", func(proxyURL *url.URL, forwardDialer proxy_Dialer) (proxy_Dialer, error) { return &httpProxyDialer{proxyURL: proxyURL, forwardDial: forwardDialer.Dial}, nil }) } type httpProxyDialer struct { proxyURL *url.URL forwardDial func(network, addr string) (net.Conn, error) } func (hpd *httpProxyDialer) Dial(network string, addr string) (net.Conn, error) { hostPort, _ := hostPortNoPort(hpd.proxyURL) conn, err := hpd.forwardDial(network, hostPort) if err != nil { return nil, err } connectHeader := make(http.Header) if user := hpd.proxyURL.User; user != nil { proxyUser := user.Username() if proxyPassword, passwordSet := user.Password(); passwordSet { credential := base64.StdEncoding.EncodeToString([]byte(proxyUser + ":" + proxyPassword)) connectHeader.Set("Proxy-Authorization", "Basic "+credential) } } connectReq := &http.Request{ Method: http.MethodConnect, URL: &url.URL{Opaque: addr}, Host: addr, Header: connectHeader, } if err := connectReq.Write(conn); err != nil { conn.Close() return nil, err } // Read response. It's OK to use and discard buffered reader here becaue // the remote server does not speak until spoken to. br := bufio.NewReader(conn) resp, err := http.ReadResponse(br, connectReq) if err != nil { conn.Close() return nil, err } if resp.StatusCode != 200 { conn.Close() f := strings.SplitN(resp.Status, " ", 2) return nil, errors.New(f[1]) } return conn, nil }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/gorilla/websocket/mask.go
vendor/github.com/gorilla/websocket/mask.go
// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved. Use of // this source code is governed by a BSD-style license that can be found in the // LICENSE file. //go:build !appengine // +build !appengine package websocket import "unsafe" const wordSize = int(unsafe.Sizeof(uintptr(0))) func maskBytes(key [4]byte, pos int, b []byte) int { // Mask one byte at a time for small buffers. if len(b) < 2*wordSize { for i := range b { b[i] ^= key[pos&3] pos++ } return pos & 3 } // Mask one byte at a time to word boundary. if n := int(uintptr(unsafe.Pointer(&b[0]))) % wordSize; n != 0 { n = wordSize - n for i := range b[:n] { b[i] ^= key[pos&3] pos++ } b = b[n:] } // Create aligned word size key. var k [wordSize]byte for i := range k { k[i] = key[(pos+i)&3] } kw := *(*uintptr)(unsafe.Pointer(&k)) // Mask one word at a time. n := (len(b) / wordSize) * wordSize for i := 0; i < n; i += wordSize { *(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&b[0])) + uintptr(i))) ^= kw } // Mask one byte at a time for remaining bytes. b = b[n:] for i := range b { b[i] ^= key[pos&3] pos++ } return pos & 3 }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/gorilla/websocket/client.go
vendor/github.com/gorilla/websocket/client.go
// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package websocket import ( "bytes" "context" "crypto/tls" "errors" "fmt" "io" "io/ioutil" "net" "net/http" "net/http/httptrace" "net/url" "strings" "time" ) // ErrBadHandshake is returned when the server response to opening handshake is // invalid. var ErrBadHandshake = errors.New("websocket: bad handshake") var errInvalidCompression = errors.New("websocket: invalid compression negotiation") // NewClient creates a new client connection using the given net connection. // The URL u specifies the host and request URI. Use requestHeader to specify // the origin (Origin), subprotocols (Sec-WebSocket-Protocol) and cookies // (Cookie). Use the response.Header to get the selected subprotocol // (Sec-WebSocket-Protocol) and cookies (Set-Cookie). // // If the WebSocket handshake fails, ErrBadHandshake is returned along with a // non-nil *http.Response so that callers can handle redirects, authentication, // etc. // // Deprecated: Use Dialer instead. func NewClient(netConn net.Conn, u *url.URL, requestHeader http.Header, readBufSize, writeBufSize int) (c *Conn, response *http.Response, err error) { d := Dialer{ ReadBufferSize: readBufSize, WriteBufferSize: writeBufSize, NetDial: func(net, addr string) (net.Conn, error) { return netConn, nil }, } return d.Dial(u.String(), requestHeader) } // A Dialer contains options for connecting to WebSocket server. // // It is safe to call Dialer's methods concurrently. type Dialer struct { // NetDial specifies the dial function for creating TCP connections. If // NetDial is nil, net.Dial is used. NetDial func(network, addr string) (net.Conn, error) // NetDialContext specifies the dial function for creating TCP connections. If // NetDialContext is nil, NetDial is used. NetDialContext func(ctx context.Context, network, addr string) (net.Conn, error) // NetDialTLSContext specifies the dial function for creating TLS/TCP connections. If // NetDialTLSContext is nil, NetDialContext is used. // If NetDialTLSContext is set, Dial assumes the TLS handshake is done there and // TLSClientConfig is ignored. NetDialTLSContext func(ctx context.Context, network, addr string) (net.Conn, error) // Proxy specifies a function to return a proxy for a given // Request. If the function returns a non-nil error, the // request is aborted with the provided error. // If Proxy is nil or returns a nil *URL, no proxy is used. Proxy func(*http.Request) (*url.URL, error) // TLSClientConfig specifies the TLS configuration to use with tls.Client. // If nil, the default configuration is used. // If either NetDialTLS or NetDialTLSContext are set, Dial assumes the TLS handshake // is done there and TLSClientConfig is ignored. TLSClientConfig *tls.Config // HandshakeTimeout specifies the duration for the handshake to complete. HandshakeTimeout time.Duration // ReadBufferSize and WriteBufferSize specify I/O buffer sizes in bytes. If a buffer // size is zero, then a useful default size is used. The I/O buffer sizes // do not limit the size of the messages that can be sent or received. ReadBufferSize, WriteBufferSize int // WriteBufferPool is a pool of buffers for write operations. If the value // is not set, then write buffers are allocated to the connection for the // lifetime of the connection. // // A pool is most useful when the application has a modest volume of writes // across a large number of connections. // // Applications should use a single pool for each unique value of // WriteBufferSize. WriteBufferPool BufferPool // Subprotocols specifies the client's requested subprotocols. Subprotocols []string // EnableCompression specifies if the client should attempt to negotiate // per message compression (RFC 7692). Setting this value to true does not // guarantee that compression will be supported. Currently only "no context // takeover" modes are supported. EnableCompression bool // Jar specifies the cookie jar. // If Jar is nil, cookies are not sent in requests and ignored // in responses. Jar http.CookieJar } // Dial creates a new client connection by calling DialContext with a background context. func (d *Dialer) Dial(urlStr string, requestHeader http.Header) (*Conn, *http.Response, error) { return d.DialContext(context.Background(), urlStr, requestHeader) } var errMalformedURL = errors.New("malformed ws or wss URL") func hostPortNoPort(u *url.URL) (hostPort, hostNoPort string) { hostPort = u.Host hostNoPort = u.Host if i := strings.LastIndex(u.Host, ":"); i > strings.LastIndex(u.Host, "]") { hostNoPort = hostNoPort[:i] } else { switch u.Scheme { case "wss": hostPort += ":443" case "https": hostPort += ":443" default: hostPort += ":80" } } return hostPort, hostNoPort } // DefaultDialer is a dialer with all fields set to the default values. var DefaultDialer = &Dialer{ Proxy: http.ProxyFromEnvironment, HandshakeTimeout: 45 * time.Second, } // nilDialer is dialer to use when receiver is nil. var nilDialer = *DefaultDialer // DialContext creates a new client connection. Use requestHeader to specify the // origin (Origin), subprotocols (Sec-WebSocket-Protocol) and cookies (Cookie). // Use the response.Header to get the selected subprotocol // (Sec-WebSocket-Protocol) and cookies (Set-Cookie). // // The context will be used in the request and in the Dialer. // // If the WebSocket handshake fails, ErrBadHandshake is returned along with a // non-nil *http.Response so that callers can handle redirects, authentication, // etcetera. The response body may not contain the entire response and does not // need to be closed by the application. func (d *Dialer) DialContext(ctx context.Context, urlStr string, requestHeader http.Header) (*Conn, *http.Response, error) { if d == nil { d = &nilDialer } challengeKey, err := generateChallengeKey() if err != nil { return nil, nil, err } u, err := url.Parse(urlStr) if err != nil { return nil, nil, err } switch u.Scheme { case "ws": u.Scheme = "http" case "wss": u.Scheme = "https" default: return nil, nil, errMalformedURL } if u.User != nil { // User name and password are not allowed in websocket URIs. return nil, nil, errMalformedURL } req := &http.Request{ Method: http.MethodGet, URL: u, Proto: "HTTP/1.1", ProtoMajor: 1, ProtoMinor: 1, Header: make(http.Header), Host: u.Host, } req = req.WithContext(ctx) // Set the cookies present in the cookie jar of the dialer if d.Jar != nil { for _, cookie := range d.Jar.Cookies(u) { req.AddCookie(cookie) } } // Set the request headers using the capitalization for names and values in // RFC examples. Although the capitalization shouldn't matter, there are // servers that depend on it. The Header.Set method is not used because the // method canonicalizes the header names. req.Header["Upgrade"] = []string{"websocket"} req.Header["Connection"] = []string{"Upgrade"} req.Header["Sec-WebSocket-Key"] = []string{challengeKey} req.Header["Sec-WebSocket-Version"] = []string{"13"} if len(d.Subprotocols) > 0 { req.Header["Sec-WebSocket-Protocol"] = []string{strings.Join(d.Subprotocols, ", ")} } for k, vs := range requestHeader { switch { case k == "Host": if len(vs) > 0 { req.Host = vs[0] } case k == "Upgrade" || k == "Connection" || k == "Sec-Websocket-Key" || k == "Sec-Websocket-Version" || k == "Sec-Websocket-Extensions" || (k == "Sec-Websocket-Protocol" && len(d.Subprotocols) > 0): return nil, nil, errors.New("websocket: duplicate header not allowed: " + k) case k == "Sec-Websocket-Protocol": req.Header["Sec-WebSocket-Protocol"] = vs default: req.Header[k] = vs } } if d.EnableCompression { req.Header["Sec-WebSocket-Extensions"] = []string{"permessage-deflate; server_no_context_takeover; client_no_context_takeover"} } if d.HandshakeTimeout != 0 { var cancel func() ctx, cancel = context.WithTimeout(ctx, d.HandshakeTimeout) defer cancel() } // Get network dial function. var netDial func(network, add string) (net.Conn, error) switch u.Scheme { case "http": if d.NetDialContext != nil { netDial = func(network, addr string) (net.Conn, error) { return d.NetDialContext(ctx, network, addr) } } else if d.NetDial != nil { netDial = d.NetDial } case "https": if d.NetDialTLSContext != nil { netDial = func(network, addr string) (net.Conn, error) { return d.NetDialTLSContext(ctx, network, addr) } } else if d.NetDialContext != nil { netDial = func(network, addr string) (net.Conn, error) { return d.NetDialContext(ctx, network, addr) } } else if d.NetDial != nil { netDial = d.NetDial } default: return nil, nil, errMalformedURL } if netDial == nil { netDialer := &net.Dialer{} netDial = func(network, addr string) (net.Conn, error) { return netDialer.DialContext(ctx, network, addr) } } // If needed, wrap the dial function to set the connection deadline. if deadline, ok := ctx.Deadline(); ok { forwardDial := netDial netDial = func(network, addr string) (net.Conn, error) { c, err := forwardDial(network, addr) if err != nil { return nil, err } err = c.SetDeadline(deadline) if err != nil { c.Close() return nil, err } return c, nil } } // If needed, wrap the dial function to connect through a proxy. if d.Proxy != nil { proxyURL, err := d.Proxy(req) if err != nil { return nil, nil, err } if proxyURL != nil { dialer, err := proxy_FromURL(proxyURL, netDialerFunc(netDial)) if err != nil { return nil, nil, err } netDial = dialer.Dial } } hostPort, hostNoPort := hostPortNoPort(u) trace := httptrace.ContextClientTrace(ctx) if trace != nil && trace.GetConn != nil { trace.GetConn(hostPort) } netConn, err := netDial("tcp", hostPort) if err != nil { return nil, nil, err } if trace != nil && trace.GotConn != nil { trace.GotConn(httptrace.GotConnInfo{ Conn: netConn, }) } defer func() { if netConn != nil { netConn.Close() } }() if u.Scheme == "https" && d.NetDialTLSContext == nil { // If NetDialTLSContext is set, assume that the TLS handshake has already been done cfg := cloneTLSConfig(d.TLSClientConfig) if cfg.ServerName == "" { cfg.ServerName = hostNoPort } tlsConn := tls.Client(netConn, cfg) netConn = tlsConn if trace != nil && trace.TLSHandshakeStart != nil { trace.TLSHandshakeStart() } err := doHandshake(ctx, tlsConn, cfg) if trace != nil && trace.TLSHandshakeDone != nil { trace.TLSHandshakeDone(tlsConn.ConnectionState(), err) } if err != nil { return nil, nil, err } } conn := newConn(netConn, false, d.ReadBufferSize, d.WriteBufferSize, d.WriteBufferPool, nil, nil) if err := req.Write(netConn); err != nil { return nil, nil, err } if trace != nil && trace.GotFirstResponseByte != nil { if peek, err := conn.br.Peek(1); err == nil && len(peek) == 1 { trace.GotFirstResponseByte() } } resp, err := http.ReadResponse(conn.br, req) if err != nil { if d.TLSClientConfig != nil { for _, proto := range d.TLSClientConfig.NextProtos { if proto != "http/1.1" { return nil, nil, fmt.Errorf( "websocket: protocol %q was given but is not supported;"+ "sharing tls.Config with net/http Transport can cause this error: %w", proto, err, ) } } } return nil, nil, err } if d.Jar != nil { if rc := resp.Cookies(); len(rc) > 0 { d.Jar.SetCookies(u, rc) } } if resp.StatusCode != 101 || !tokenListContainsValue(resp.Header, "Upgrade", "websocket") || !tokenListContainsValue(resp.Header, "Connection", "upgrade") || resp.Header.Get("Sec-Websocket-Accept") != computeAcceptKey(challengeKey) { // Before closing the network connection on return from this // function, slurp up some of the response to aid application // debugging. buf := make([]byte, 1024) n, _ := io.ReadFull(resp.Body, buf) resp.Body = ioutil.NopCloser(bytes.NewReader(buf[:n])) return nil, resp, ErrBadHandshake } for _, ext := range parseExtensions(resp.Header) { if ext[""] != "permessage-deflate" { continue } _, snct := ext["server_no_context_takeover"] _, cnct := ext["client_no_context_takeover"] if !snct || !cnct { return nil, resp, errInvalidCompression } conn.newCompressionWriter = compressNoContextTakeover conn.newDecompressionReader = decompressNoContextTakeover break } resp.Body = ioutil.NopCloser(bytes.NewReader([]byte{})) conn.subprotocol = resp.Header.Get("Sec-Websocket-Protocol") netConn.SetDeadline(time.Time{}) netConn = nil // to avoid close in defer. return conn, resp, nil } func cloneTLSConfig(cfg *tls.Config) *tls.Config { if cfg == nil { return &tls.Config{} } return cfg.Clone() }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/gorilla/websocket/join.go
vendor/github.com/gorilla/websocket/join.go
// Copyright 2019 The Gorilla WebSocket Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package websocket import ( "io" "strings" ) // JoinMessages concatenates received messages to create a single io.Reader. // The string term is appended to each message. The returned reader does not // support concurrent calls to the Read method. func JoinMessages(c *Conn, term string) io.Reader { return &joinReader{c: c, term: term} } type joinReader struct { c *Conn term string r io.Reader } func (r *joinReader) Read(p []byte) (int, error) { if r.r == nil { var err error _, r.r, err = r.c.NextReader() if err != nil { return 0, err } if r.term != "" { r.r = io.MultiReader(r.r, strings.NewReader(r.term)) } } n, err := r.r.Read(p) if err == io.EOF { err = nil r.r = nil } return n, err }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/gorilla/websocket/util.go
vendor/github.com/gorilla/websocket/util.go
// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package websocket import ( "crypto/rand" "crypto/sha1" "encoding/base64" "io" "net/http" "strings" "unicode/utf8" ) var keyGUID = []byte("258EAFA5-E914-47DA-95CA-C5AB0DC85B11") func computeAcceptKey(challengeKey string) string { h := sha1.New() h.Write([]byte(challengeKey)) h.Write(keyGUID) return base64.StdEncoding.EncodeToString(h.Sum(nil)) } func generateChallengeKey() (string, error) { p := make([]byte, 16) if _, err := io.ReadFull(rand.Reader, p); err != nil { return "", err } return base64.StdEncoding.EncodeToString(p), nil } // Token octets per RFC 2616. var isTokenOctet = [256]bool{ '!': true, '#': true, '$': true, '%': true, '&': true, '\'': true, '*': true, '+': true, '-': true, '.': true, '0': true, '1': true, '2': true, '3': true, '4': true, '5': true, '6': true, '7': true, '8': true, '9': true, 'A': true, 'B': true, 'C': true, 'D': true, 'E': true, 'F': true, 'G': true, 'H': true, 'I': true, 'J': true, 'K': true, 'L': true, 'M': true, 'N': true, 'O': true, 'P': true, 'Q': true, 'R': true, 'S': true, 'T': true, 'U': true, 'W': true, 'V': true, 'X': true, 'Y': true, 'Z': true, '^': true, '_': true, '`': true, 'a': true, 'b': true, 'c': true, 'd': true, 'e': true, 'f': true, 'g': true, 'h': true, 'i': true, 'j': true, 'k': true, 'l': true, 'm': true, 'n': true, 'o': true, 'p': true, 'q': true, 'r': true, 's': true, 't': true, 'u': true, 'v': true, 'w': true, 'x': true, 'y': true, 'z': true, '|': true, '~': true, } // skipSpace returns a slice of the string s with all leading RFC 2616 linear // whitespace removed. func skipSpace(s string) (rest string) { i := 0 for ; i < len(s); i++ { if b := s[i]; b != ' ' && b != '\t' { break } } return s[i:] } // nextToken returns the leading RFC 2616 token of s and the string following // the token. func nextToken(s string) (token, rest string) { i := 0 for ; i < len(s); i++ { if !isTokenOctet[s[i]] { break } } return s[:i], s[i:] } // nextTokenOrQuoted returns the leading token or quoted string per RFC 2616 // and the string following the token or quoted string. func nextTokenOrQuoted(s string) (value string, rest string) { if !strings.HasPrefix(s, "\"") { return nextToken(s) } s = s[1:] for i := 0; i < len(s); i++ { switch s[i] { case '"': return s[:i], s[i+1:] case '\\': p := make([]byte, len(s)-1) j := copy(p, s[:i]) escape := true for i = i + 1; i < len(s); i++ { b := s[i] switch { case escape: escape = false p[j] = b j++ case b == '\\': escape = true case b == '"': return string(p[:j]), s[i+1:] default: p[j] = b j++ } } return "", "" } } return "", "" } // equalASCIIFold returns true if s is equal to t with ASCII case folding as // defined in RFC 4790. func equalASCIIFold(s, t string) bool { for s != "" && t != "" { sr, size := utf8.DecodeRuneInString(s) s = s[size:] tr, size := utf8.DecodeRuneInString(t) t = t[size:] if sr == tr { continue } if 'A' <= sr && sr <= 'Z' { sr = sr + 'a' - 'A' } if 'A' <= tr && tr <= 'Z' { tr = tr + 'a' - 'A' } if sr != tr { return false } } return s == t } // tokenListContainsValue returns true if the 1#token header with the given // name contains a token equal to value with ASCII case folding. func tokenListContainsValue(header http.Header, name string, value string) bool { headers: for _, s := range header[name] { for { var t string t, s = nextToken(skipSpace(s)) if t == "" { continue headers } s = skipSpace(s) if s != "" && s[0] != ',' { continue headers } if equalASCIIFold(t, value) { return true } if s == "" { continue headers } s = s[1:] } } return false } // parseExtensions parses WebSocket extensions from a header. func parseExtensions(header http.Header) []map[string]string { // From RFC 6455: // // Sec-WebSocket-Extensions = extension-list // extension-list = 1#extension // extension = extension-token *( ";" extension-param ) // extension-token = registered-token // registered-token = token // extension-param = token [ "=" (token | quoted-string) ] // ;When using the quoted-string syntax variant, the value // ;after quoted-string unescaping MUST conform to the // ;'token' ABNF. var result []map[string]string headers: for _, s := range header["Sec-Websocket-Extensions"] { for { var t string t, s = nextToken(skipSpace(s)) if t == "" { continue headers } ext := map[string]string{"": t} for { s = skipSpace(s) if !strings.HasPrefix(s, ";") { break } var k string k, s = nextToken(skipSpace(s[1:])) if k == "" { continue headers } s = skipSpace(s) var v string if strings.HasPrefix(s, "=") { v, s = nextTokenOrQuoted(skipSpace(s[1:])) s = skipSpace(s) } if s != "" && s[0] != ',' && s[0] != ';' { continue headers } ext[k] = v } if s != "" && s[0] != ',' { continue headers } result = append(result, ext) if s == "" { continue headers } s = s[1:] } } return result } // isValidChallengeKey checks if the argument meets RFC6455 specification. func isValidChallengeKey(s string) bool { // From RFC6455: // // A |Sec-WebSocket-Key| header field with a base64-encoded (see // Section 4 of [RFC4648]) value that, when decoded, is 16 bytes in // length. if s == "" { return false } decoded, err := base64.StdEncoding.DecodeString(s) return err == nil && len(decoded) == 16 }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/gorilla/websocket/mask_safe.go
vendor/github.com/gorilla/websocket/mask_safe.go
// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved. Use of // this source code is governed by a BSD-style license that can be found in the // LICENSE file. //go:build appengine // +build appengine package websocket func maskBytes(key [4]byte, pos int, b []byte) int { for i := range b { b[i] ^= key[pos&3] pos++ } return pos & 3 }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/gorilla/websocket/conn.go
vendor/github.com/gorilla/websocket/conn.go
// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package websocket import ( "bufio" "encoding/binary" "errors" "io" "io/ioutil" "math/rand" "net" "strconv" "strings" "sync" "time" "unicode/utf8" ) const ( // Frame header byte 0 bits from Section 5.2 of RFC 6455 finalBit = 1 << 7 rsv1Bit = 1 << 6 rsv2Bit = 1 << 5 rsv3Bit = 1 << 4 // Frame header byte 1 bits from Section 5.2 of RFC 6455 maskBit = 1 << 7 maxFrameHeaderSize = 2 + 8 + 4 // Fixed header + length + mask maxControlFramePayloadSize = 125 writeWait = time.Second defaultReadBufferSize = 4096 defaultWriteBufferSize = 4096 continuationFrame = 0 noFrame = -1 ) // Close codes defined in RFC 6455, section 11.7. const ( CloseNormalClosure = 1000 CloseGoingAway = 1001 CloseProtocolError = 1002 CloseUnsupportedData = 1003 CloseNoStatusReceived = 1005 CloseAbnormalClosure = 1006 CloseInvalidFramePayloadData = 1007 ClosePolicyViolation = 1008 CloseMessageTooBig = 1009 CloseMandatoryExtension = 1010 CloseInternalServerErr = 1011 CloseServiceRestart = 1012 CloseTryAgainLater = 1013 CloseTLSHandshake = 1015 ) // The message types are defined in RFC 6455, section 11.8. const ( // TextMessage denotes a text data message. The text message payload is // interpreted as UTF-8 encoded text data. TextMessage = 1 // BinaryMessage denotes a binary data message. BinaryMessage = 2 // CloseMessage denotes a close control message. The optional message // payload contains a numeric code and text. Use the FormatCloseMessage // function to format a close message payload. CloseMessage = 8 // PingMessage denotes a ping control message. The optional message payload // is UTF-8 encoded text. PingMessage = 9 // PongMessage denotes a pong control message. The optional message payload // is UTF-8 encoded text. PongMessage = 10 ) // ErrCloseSent is returned when the application writes a message to the // connection after sending a close message. var ErrCloseSent = errors.New("websocket: close sent") // ErrReadLimit is returned when reading a message that is larger than the // read limit set for the connection. var ErrReadLimit = errors.New("websocket: read limit exceeded") // netError satisfies the net Error interface. type netError struct { msg string temporary bool timeout bool } func (e *netError) Error() string { return e.msg } func (e *netError) Temporary() bool { return e.temporary } func (e *netError) Timeout() bool { return e.timeout } // CloseError represents a close message. type CloseError struct { // Code is defined in RFC 6455, section 11.7. Code int // Text is the optional text payload. Text string } func (e *CloseError) Error() string { s := []byte("websocket: close ") s = strconv.AppendInt(s, int64(e.Code), 10) switch e.Code { case CloseNormalClosure: s = append(s, " (normal)"...) case CloseGoingAway: s = append(s, " (going away)"...) case CloseProtocolError: s = append(s, " (protocol error)"...) case CloseUnsupportedData: s = append(s, " (unsupported data)"...) case CloseNoStatusReceived: s = append(s, " (no status)"...) case CloseAbnormalClosure: s = append(s, " (abnormal closure)"...) case CloseInvalidFramePayloadData: s = append(s, " (invalid payload data)"...) case ClosePolicyViolation: s = append(s, " (policy violation)"...) case CloseMessageTooBig: s = append(s, " (message too big)"...) case CloseMandatoryExtension: s = append(s, " (mandatory extension missing)"...) case CloseInternalServerErr: s = append(s, " (internal server error)"...) case CloseTLSHandshake: s = append(s, " (TLS handshake error)"...) } if e.Text != "" { s = append(s, ": "...) s = append(s, e.Text...) } return string(s) } // IsCloseError returns boolean indicating whether the error is a *CloseError // with one of the specified codes. func IsCloseError(err error, codes ...int) bool { if e, ok := err.(*CloseError); ok { for _, code := range codes { if e.Code == code { return true } } } return false } // IsUnexpectedCloseError returns boolean indicating whether the error is a // *CloseError with a code not in the list of expected codes. func IsUnexpectedCloseError(err error, expectedCodes ...int) bool { if e, ok := err.(*CloseError); ok { for _, code := range expectedCodes { if e.Code == code { return false } } return true } return false } var ( errWriteTimeout = &netError{msg: "websocket: write timeout", timeout: true, temporary: true} errUnexpectedEOF = &CloseError{Code: CloseAbnormalClosure, Text: io.ErrUnexpectedEOF.Error()} errBadWriteOpCode = errors.New("websocket: bad write message type") errWriteClosed = errors.New("websocket: write closed") errInvalidControlFrame = errors.New("websocket: invalid control frame") ) func newMaskKey() [4]byte { n := rand.Uint32() return [4]byte{byte(n), byte(n >> 8), byte(n >> 16), byte(n >> 24)} } func hideTempErr(err error) error { if e, ok := err.(net.Error); ok && e.Temporary() { err = &netError{msg: e.Error(), timeout: e.Timeout()} } return err } func isControl(frameType int) bool { return frameType == CloseMessage || frameType == PingMessage || frameType == PongMessage } func isData(frameType int) bool { return frameType == TextMessage || frameType == BinaryMessage } var validReceivedCloseCodes = map[int]bool{ // see http://www.iana.org/assignments/websocket/websocket.xhtml#close-code-number CloseNormalClosure: true, CloseGoingAway: true, CloseProtocolError: true, CloseUnsupportedData: true, CloseNoStatusReceived: false, CloseAbnormalClosure: false, CloseInvalidFramePayloadData: true, ClosePolicyViolation: true, CloseMessageTooBig: true, CloseMandatoryExtension: true, CloseInternalServerErr: true, CloseServiceRestart: true, CloseTryAgainLater: true, CloseTLSHandshake: false, } func isValidReceivedCloseCode(code int) bool { return validReceivedCloseCodes[code] || (code >= 3000 && code <= 4999) } // BufferPool represents a pool of buffers. The *sync.Pool type satisfies this // interface. The type of the value stored in a pool is not specified. type BufferPool interface { // Get gets a value from the pool or returns nil if the pool is empty. Get() interface{} // Put adds a value to the pool. Put(interface{}) } // writePoolData is the type added to the write buffer pool. This wrapper is // used to prevent applications from peeking at and depending on the values // added to the pool. type writePoolData struct{ buf []byte } // The Conn type represents a WebSocket connection. type Conn struct { conn net.Conn isServer bool subprotocol string // Write fields mu chan struct{} // used as mutex to protect write to conn writeBuf []byte // frame is constructed in this buffer. writePool BufferPool writeBufSize int writeDeadline time.Time writer io.WriteCloser // the current writer returned to the application isWriting bool // for best-effort concurrent write detection writeErrMu sync.Mutex writeErr error enableWriteCompression bool compressionLevel int newCompressionWriter func(io.WriteCloser, int) io.WriteCloser // Read fields reader io.ReadCloser // the current reader returned to the application readErr error br *bufio.Reader // bytes remaining in current frame. // set setReadRemaining to safely update this value and prevent overflow readRemaining int64 readFinal bool // true the current message has more frames. readLength int64 // Message size. readLimit int64 // Maximum message size. readMaskPos int readMaskKey [4]byte handlePong func(string) error handlePing func(string) error handleClose func(int, string) error readErrCount int messageReader *messageReader // the current low-level reader readDecompress bool // whether last read frame had RSV1 set newDecompressionReader func(io.Reader) io.ReadCloser } func newConn(conn net.Conn, isServer bool, readBufferSize, writeBufferSize int, writeBufferPool BufferPool, br *bufio.Reader, writeBuf []byte) *Conn { if br == nil { if readBufferSize == 0 { readBufferSize = defaultReadBufferSize } else if readBufferSize < maxControlFramePayloadSize { // must be large enough for control frame readBufferSize = maxControlFramePayloadSize } br = bufio.NewReaderSize(conn, readBufferSize) } if writeBufferSize <= 0 { writeBufferSize = defaultWriteBufferSize } writeBufferSize += maxFrameHeaderSize if writeBuf == nil && writeBufferPool == nil { writeBuf = make([]byte, writeBufferSize) } mu := make(chan struct{}, 1) mu <- struct{}{} c := &Conn{ isServer: isServer, br: br, conn: conn, mu: mu, readFinal: true, writeBuf: writeBuf, writePool: writeBufferPool, writeBufSize: writeBufferSize, enableWriteCompression: true, compressionLevel: defaultCompressionLevel, } c.SetCloseHandler(nil) c.SetPingHandler(nil) c.SetPongHandler(nil) return c } // setReadRemaining tracks the number of bytes remaining on the connection. If n // overflows, an ErrReadLimit is returned. func (c *Conn) setReadRemaining(n int64) error { if n < 0 { return ErrReadLimit } c.readRemaining = n return nil } // Subprotocol returns the negotiated protocol for the connection. func (c *Conn) Subprotocol() string { return c.subprotocol } // Close closes the underlying network connection without sending or waiting // for a close message. func (c *Conn) Close() error { return c.conn.Close() } // LocalAddr returns the local network address. func (c *Conn) LocalAddr() net.Addr { return c.conn.LocalAddr() } // RemoteAddr returns the remote network address. func (c *Conn) RemoteAddr() net.Addr { return c.conn.RemoteAddr() } // Write methods func (c *Conn) writeFatal(err error) error { err = hideTempErr(err) c.writeErrMu.Lock() if c.writeErr == nil { c.writeErr = err } c.writeErrMu.Unlock() return err } func (c *Conn) read(n int) ([]byte, error) { p, err := c.br.Peek(n) if err == io.EOF { err = errUnexpectedEOF } c.br.Discard(len(p)) return p, err } func (c *Conn) write(frameType int, deadline time.Time, buf0, buf1 []byte) error { <-c.mu defer func() { c.mu <- struct{}{} }() c.writeErrMu.Lock() err := c.writeErr c.writeErrMu.Unlock() if err != nil { return err } c.conn.SetWriteDeadline(deadline) if len(buf1) == 0 { _, err = c.conn.Write(buf0) } else { err = c.writeBufs(buf0, buf1) } if err != nil { return c.writeFatal(err) } if frameType == CloseMessage { c.writeFatal(ErrCloseSent) } return nil } func (c *Conn) writeBufs(bufs ...[]byte) error { b := net.Buffers(bufs) _, err := b.WriteTo(c.conn) return err } // WriteControl writes a control message with the given deadline. The allowed // message types are CloseMessage, PingMessage and PongMessage. func (c *Conn) WriteControl(messageType int, data []byte, deadline time.Time) error { if !isControl(messageType) { return errBadWriteOpCode } if len(data) > maxControlFramePayloadSize { return errInvalidControlFrame } b0 := byte(messageType) | finalBit b1 := byte(len(data)) if !c.isServer { b1 |= maskBit } buf := make([]byte, 0, maxFrameHeaderSize+maxControlFramePayloadSize) buf = append(buf, b0, b1) if c.isServer { buf = append(buf, data...) } else { key := newMaskKey() buf = append(buf, key[:]...) buf = append(buf, data...) maskBytes(key, 0, buf[6:]) } d := 1000 * time.Hour if !deadline.IsZero() { d = deadline.Sub(time.Now()) if d < 0 { return errWriteTimeout } } timer := time.NewTimer(d) select { case <-c.mu: timer.Stop() case <-timer.C: return errWriteTimeout } defer func() { c.mu <- struct{}{} }() c.writeErrMu.Lock() err := c.writeErr c.writeErrMu.Unlock() if err != nil { return err } c.conn.SetWriteDeadline(deadline) _, err = c.conn.Write(buf) if err != nil { return c.writeFatal(err) } if messageType == CloseMessage { c.writeFatal(ErrCloseSent) } return err } // beginMessage prepares a connection and message writer for a new message. func (c *Conn) beginMessage(mw *messageWriter, messageType int) error { // Close previous writer if not already closed by the application. It's // probably better to return an error in this situation, but we cannot // change this without breaking existing applications. if c.writer != nil { c.writer.Close() c.writer = nil } if !isControl(messageType) && !isData(messageType) { return errBadWriteOpCode } c.writeErrMu.Lock() err := c.writeErr c.writeErrMu.Unlock() if err != nil { return err } mw.c = c mw.frameType = messageType mw.pos = maxFrameHeaderSize if c.writeBuf == nil { wpd, ok := c.writePool.Get().(writePoolData) if ok { c.writeBuf = wpd.buf } else { c.writeBuf = make([]byte, c.writeBufSize) } } return nil } // NextWriter returns a writer for the next message to send. The writer's Close // method flushes the complete message to the network. // // There can be at most one open writer on a connection. NextWriter closes the // previous writer if the application has not already done so. // // All message types (TextMessage, BinaryMessage, CloseMessage, PingMessage and // PongMessage) are supported. func (c *Conn) NextWriter(messageType int) (io.WriteCloser, error) { var mw messageWriter if err := c.beginMessage(&mw, messageType); err != nil { return nil, err } c.writer = &mw if c.newCompressionWriter != nil && c.enableWriteCompression && isData(messageType) { w := c.newCompressionWriter(c.writer, c.compressionLevel) mw.compress = true c.writer = w } return c.writer, nil } type messageWriter struct { c *Conn compress bool // whether next call to flushFrame should set RSV1 pos int // end of data in writeBuf. frameType int // type of the current frame. err error } func (w *messageWriter) endMessage(err error) error { if w.err != nil { return err } c := w.c w.err = err c.writer = nil if c.writePool != nil { c.writePool.Put(writePoolData{buf: c.writeBuf}) c.writeBuf = nil } return err } // flushFrame writes buffered data and extra as a frame to the network. The // final argument indicates that this is the last frame in the message. func (w *messageWriter) flushFrame(final bool, extra []byte) error { c := w.c length := w.pos - maxFrameHeaderSize + len(extra) // Check for invalid control frames. if isControl(w.frameType) && (!final || length > maxControlFramePayloadSize) { return w.endMessage(errInvalidControlFrame) } b0 := byte(w.frameType) if final { b0 |= finalBit } if w.compress { b0 |= rsv1Bit } w.compress = false b1 := byte(0) if !c.isServer { b1 |= maskBit } // Assume that the frame starts at beginning of c.writeBuf. framePos := 0 if c.isServer { // Adjust up if mask not included in the header. framePos = 4 } switch { case length >= 65536: c.writeBuf[framePos] = b0 c.writeBuf[framePos+1] = b1 | 127 binary.BigEndian.PutUint64(c.writeBuf[framePos+2:], uint64(length)) case length > 125: framePos += 6 c.writeBuf[framePos] = b0 c.writeBuf[framePos+1] = b1 | 126 binary.BigEndian.PutUint16(c.writeBuf[framePos+2:], uint16(length)) default: framePos += 8 c.writeBuf[framePos] = b0 c.writeBuf[framePos+1] = b1 | byte(length) } if !c.isServer { key := newMaskKey() copy(c.writeBuf[maxFrameHeaderSize-4:], key[:]) maskBytes(key, 0, c.writeBuf[maxFrameHeaderSize:w.pos]) if len(extra) > 0 { return w.endMessage(c.writeFatal(errors.New("websocket: internal error, extra used in client mode"))) } } // Write the buffers to the connection with best-effort detection of // concurrent writes. See the concurrency section in the package // documentation for more info. if c.isWriting { panic("concurrent write to websocket connection") } c.isWriting = true err := c.write(w.frameType, c.writeDeadline, c.writeBuf[framePos:w.pos], extra) if !c.isWriting { panic("concurrent write to websocket connection") } c.isWriting = false if err != nil { return w.endMessage(err) } if final { w.endMessage(errWriteClosed) return nil } // Setup for next frame. w.pos = maxFrameHeaderSize w.frameType = continuationFrame return nil } func (w *messageWriter) ncopy(max int) (int, error) { n := len(w.c.writeBuf) - w.pos if n <= 0 { if err := w.flushFrame(false, nil); err != nil { return 0, err } n = len(w.c.writeBuf) - w.pos } if n > max { n = max } return n, nil } func (w *messageWriter) Write(p []byte) (int, error) { if w.err != nil { return 0, w.err } if len(p) > 2*len(w.c.writeBuf) && w.c.isServer { // Don't buffer large messages. err := w.flushFrame(false, p) if err != nil { return 0, err } return len(p), nil } nn := len(p) for len(p) > 0 { n, err := w.ncopy(len(p)) if err != nil { return 0, err } copy(w.c.writeBuf[w.pos:], p[:n]) w.pos += n p = p[n:] } return nn, nil } func (w *messageWriter) WriteString(p string) (int, error) { if w.err != nil { return 0, w.err } nn := len(p) for len(p) > 0 { n, err := w.ncopy(len(p)) if err != nil { return 0, err } copy(w.c.writeBuf[w.pos:], p[:n]) w.pos += n p = p[n:] } return nn, nil } func (w *messageWriter) ReadFrom(r io.Reader) (nn int64, err error) { if w.err != nil { return 0, w.err } for { if w.pos == len(w.c.writeBuf) { err = w.flushFrame(false, nil) if err != nil { break } } var n int n, err = r.Read(w.c.writeBuf[w.pos:]) w.pos += n nn += int64(n) if err != nil { if err == io.EOF { err = nil } break } } return nn, err } func (w *messageWriter) Close() error { if w.err != nil { return w.err } return w.flushFrame(true, nil) } // WritePreparedMessage writes prepared message into connection. func (c *Conn) WritePreparedMessage(pm *PreparedMessage) error { frameType, frameData, err := pm.frame(prepareKey{ isServer: c.isServer, compress: c.newCompressionWriter != nil && c.enableWriteCompression && isData(pm.messageType), compressionLevel: c.compressionLevel, }) if err != nil { return err } if c.isWriting { panic("concurrent write to websocket connection") } c.isWriting = true err = c.write(frameType, c.writeDeadline, frameData, nil) if !c.isWriting { panic("concurrent write to websocket connection") } c.isWriting = false return err } // WriteMessage is a helper method for getting a writer using NextWriter, // writing the message and closing the writer. func (c *Conn) WriteMessage(messageType int, data []byte) error { if c.isServer && (c.newCompressionWriter == nil || !c.enableWriteCompression) { // Fast path with no allocations and single frame. var mw messageWriter if err := c.beginMessage(&mw, messageType); err != nil { return err } n := copy(c.writeBuf[mw.pos:], data) mw.pos += n data = data[n:] return mw.flushFrame(true, data) } w, err := c.NextWriter(messageType) if err != nil { return err } if _, err = w.Write(data); err != nil { return err } return w.Close() } // SetWriteDeadline sets the write deadline on the underlying network // connection. After a write has timed out, the websocket state is corrupt and // all future writes will return an error. A zero value for t means writes will // not time out. func (c *Conn) SetWriteDeadline(t time.Time) error { c.writeDeadline = t return nil } // Read methods func (c *Conn) advanceFrame() (int, error) { // 1. Skip remainder of previous frame. if c.readRemaining > 0 { if _, err := io.CopyN(ioutil.Discard, c.br, c.readRemaining); err != nil { return noFrame, err } } // 2. Read and parse first two bytes of frame header. // To aid debugging, collect and report all errors in the first two bytes // of the header. var errors []string p, err := c.read(2) if err != nil { return noFrame, err } frameType := int(p[0] & 0xf) final := p[0]&finalBit != 0 rsv1 := p[0]&rsv1Bit != 0 rsv2 := p[0]&rsv2Bit != 0 rsv3 := p[0]&rsv3Bit != 0 mask := p[1]&maskBit != 0 c.setReadRemaining(int64(p[1] & 0x7f)) c.readDecompress = false if rsv1 { if c.newDecompressionReader != nil { c.readDecompress = true } else { errors = append(errors, "RSV1 set") } } if rsv2 { errors = append(errors, "RSV2 set") } if rsv3 { errors = append(errors, "RSV3 set") } switch frameType { case CloseMessage, PingMessage, PongMessage: if c.readRemaining > maxControlFramePayloadSize { errors = append(errors, "len > 125 for control") } if !final { errors = append(errors, "FIN not set on control") } case TextMessage, BinaryMessage: if !c.readFinal { errors = append(errors, "data before FIN") } c.readFinal = final case continuationFrame: if c.readFinal { errors = append(errors, "continuation after FIN") } c.readFinal = final default: errors = append(errors, "bad opcode "+strconv.Itoa(frameType)) } if mask != c.isServer { errors = append(errors, "bad MASK") } if len(errors) > 0 { return noFrame, c.handleProtocolError(strings.Join(errors, ", ")) } // 3. Read and parse frame length as per // https://tools.ietf.org/html/rfc6455#section-5.2 // // The length of the "Payload data", in bytes: if 0-125, that is the payload // length. // - If 126, the following 2 bytes interpreted as a 16-bit unsigned // integer are the payload length. // - If 127, the following 8 bytes interpreted as // a 64-bit unsigned integer (the most significant bit MUST be 0) are the // payload length. Multibyte length quantities are expressed in network byte // order. switch c.readRemaining { case 126: p, err := c.read(2) if err != nil { return noFrame, err } if err := c.setReadRemaining(int64(binary.BigEndian.Uint16(p))); err != nil { return noFrame, err } case 127: p, err := c.read(8) if err != nil { return noFrame, err } if err := c.setReadRemaining(int64(binary.BigEndian.Uint64(p))); err != nil { return noFrame, err } } // 4. Handle frame masking. if mask { c.readMaskPos = 0 p, err := c.read(len(c.readMaskKey)) if err != nil { return noFrame, err } copy(c.readMaskKey[:], p) } // 5. For text and binary messages, enforce read limit and return. if frameType == continuationFrame || frameType == TextMessage || frameType == BinaryMessage { c.readLength += c.readRemaining // Don't allow readLength to overflow in the presence of a large readRemaining // counter. if c.readLength < 0 { return noFrame, ErrReadLimit } if c.readLimit > 0 && c.readLength > c.readLimit { c.WriteControl(CloseMessage, FormatCloseMessage(CloseMessageTooBig, ""), time.Now().Add(writeWait)) return noFrame, ErrReadLimit } return frameType, nil } // 6. Read control frame payload. var payload []byte if c.readRemaining > 0 { payload, err = c.read(int(c.readRemaining)) c.setReadRemaining(0) if err != nil { return noFrame, err } if c.isServer { maskBytes(c.readMaskKey, 0, payload) } } // 7. Process control frame payload. switch frameType { case PongMessage: if err := c.handlePong(string(payload)); err != nil { return noFrame, err } case PingMessage: if err := c.handlePing(string(payload)); err != nil { return noFrame, err } case CloseMessage: closeCode := CloseNoStatusReceived closeText := "" if len(payload) >= 2 { closeCode = int(binary.BigEndian.Uint16(payload)) if !isValidReceivedCloseCode(closeCode) { return noFrame, c.handleProtocolError("bad close code " + strconv.Itoa(closeCode)) } closeText = string(payload[2:]) if !utf8.ValidString(closeText) { return noFrame, c.handleProtocolError("invalid utf8 payload in close frame") } } if err := c.handleClose(closeCode, closeText); err != nil { return noFrame, err } return noFrame, &CloseError{Code: closeCode, Text: closeText} } return frameType, nil } func (c *Conn) handleProtocolError(message string) error { data := FormatCloseMessage(CloseProtocolError, message) if len(data) > maxControlFramePayloadSize { data = data[:maxControlFramePayloadSize] } c.WriteControl(CloseMessage, data, time.Now().Add(writeWait)) return errors.New("websocket: " + message) } // NextReader returns the next data message received from the peer. The // returned messageType is either TextMessage or BinaryMessage. // // There can be at most one open reader on a connection. NextReader discards // the previous message if the application has not already consumed it. // // Applications must break out of the application's read loop when this method // returns a non-nil error value. Errors returned from this method are // permanent. Once this method returns a non-nil error, all subsequent calls to // this method return the same error. func (c *Conn) NextReader() (messageType int, r io.Reader, err error) { // Close previous reader, only relevant for decompression. if c.reader != nil { c.reader.Close() c.reader = nil } c.messageReader = nil c.readLength = 0 for c.readErr == nil { frameType, err := c.advanceFrame() if err != nil { c.readErr = hideTempErr(err) break } if frameType == TextMessage || frameType == BinaryMessage { c.messageReader = &messageReader{c} c.reader = c.messageReader if c.readDecompress { c.reader = c.newDecompressionReader(c.reader) } return frameType, c.reader, nil } } // Applications that do handle the error returned from this method spin in // tight loop on connection failure. To help application developers detect // this error, panic on repeated reads to the failed connection. c.readErrCount++ if c.readErrCount >= 1000 { panic("repeated read on failed websocket connection") } return noFrame, nil, c.readErr } type messageReader struct{ c *Conn } func (r *messageReader) Read(b []byte) (int, error) { c := r.c if c.messageReader != r { return 0, io.EOF } for c.readErr == nil { if c.readRemaining > 0 { if int64(len(b)) > c.readRemaining { b = b[:c.readRemaining] } n, err := c.br.Read(b) c.readErr = hideTempErr(err) if c.isServer { c.readMaskPos = maskBytes(c.readMaskKey, c.readMaskPos, b[:n]) } rem := c.readRemaining rem -= int64(n) c.setReadRemaining(rem) if c.readRemaining > 0 && c.readErr == io.EOF { c.readErr = errUnexpectedEOF } return n, c.readErr } if c.readFinal { c.messageReader = nil return 0, io.EOF } frameType, err := c.advanceFrame() switch { case err != nil: c.readErr = hideTempErr(err) case frameType == TextMessage || frameType == BinaryMessage: c.readErr = errors.New("websocket: internal error, unexpected text or binary in Reader") } } err := c.readErr if err == io.EOF && c.messageReader == r { err = errUnexpectedEOF } return 0, err } func (r *messageReader) Close() error { return nil } // ReadMessage is a helper method for getting a reader using NextReader and // reading from that reader to a buffer. func (c *Conn) ReadMessage() (messageType int, p []byte, err error) { var r io.Reader messageType, r, err = c.NextReader() if err != nil { return messageType, nil, err } p, err = ioutil.ReadAll(r) return messageType, p, err } // SetReadDeadline sets the read deadline on the underlying network connection. // After a read has timed out, the websocket connection state is corrupt and // all future reads will return an error. A zero value for t means reads will // not time out. func (c *Conn) SetReadDeadline(t time.Time) error { return c.conn.SetReadDeadline(t) } // SetReadLimit sets the maximum size in bytes for a message read from the peer. If a // message exceeds the limit, the connection sends a close message to the peer // and returns ErrReadLimit to the application. func (c *Conn) SetReadLimit(limit int64) { c.readLimit = limit } // CloseHandler returns the current close handler func (c *Conn) CloseHandler() func(code int, text string) error { return c.handleClose } // SetCloseHandler sets the handler for close messages received from the peer. // The code argument to h is the received close code or CloseNoStatusReceived // if the close message is empty. The default close handler sends a close // message back to the peer. // // The handler function is called from the NextReader, ReadMessage and message // reader Read methods. The application must read the connection to process // close messages as described in the section on Control Messages above. // // The connection read methods return a CloseError when a close message is // received. Most applications should handle close messages as part of their // normal error handling. Applications should only set a close handler when the // application must perform some action before sending a close message back to // the peer. func (c *Conn) SetCloseHandler(h func(code int, text string) error) { if h == nil { h = func(code int, text string) error { message := FormatCloseMessage(code, "") c.WriteControl(CloseMessage, message, time.Now().Add(writeWait)) return nil } } c.handleClose = h } // PingHandler returns the current ping handler func (c *Conn) PingHandler() func(appData string) error { return c.handlePing } // SetPingHandler sets the handler for ping messages received from the peer. // The appData argument to h is the PING message application data. The default // ping handler sends a pong to the peer. // // The handler function is called from the NextReader, ReadMessage and message // reader Read methods. The application must read the connection to process // ping messages as described in the section on Control Messages above. func (c *Conn) SetPingHandler(h func(appData string) error) { if h == nil { h = func(message string) error { err := c.WriteControl(PongMessage, []byte(message), time.Now().Add(writeWait)) if err == ErrCloseSent { return nil } else if e, ok := err.(net.Error); ok && e.Temporary() { return nil } return err } } c.handlePing = h } // PongHandler returns the current pong handler func (c *Conn) PongHandler() func(appData string) error { return c.handlePong } // SetPongHandler sets the handler for pong messages received from the peer. // The appData argument to h is the PONG message application data. The default // pong handler does nothing. // // The handler function is called from the NextReader, ReadMessage and message // reader Read methods. The application must read the connection to process // pong messages as described in the section on Control Messages above. func (c *Conn) SetPongHandler(h func(appData string) error) { if h == nil { h = func(string) error { return nil } } c.handlePong = h } // NetConn returns the underlying connection that is wrapped by c. // Note that writing to or reading from this connection directly will corrupt the // WebSocket connection. func (c *Conn) NetConn() net.Conn { return c.conn } // UnderlyingConn returns the internal net.Conn. This can be used to further // modifications to connection specific flags. // Deprecated: Use the NetConn method. func (c *Conn) UnderlyingConn() net.Conn { return c.conn } // EnableWriteCompression enables and disables write compression of // subsequent text and binary messages. This function is a noop if // compression was not negotiated with the peer. func (c *Conn) EnableWriteCompression(enable bool) { c.enableWriteCompression = enable } // SetCompressionLevel sets the flate compression level for subsequent text and // binary messages. This function is a noop if compression was not negotiated // with the peer. See the compress/flate package for a description of // compression levels. func (c *Conn) SetCompressionLevel(level int) error { if !isValidCompressionLevel(level) { return errors.New("websocket: invalid compression level") } c.compressionLevel = level return nil } // FormatCloseMessage formats closeCode and text as a WebSocket close message. // An empty message is returned for code CloseNoStatusReceived. func FormatCloseMessage(closeCode int, text string) []byte { if closeCode == CloseNoStatusReceived { // Return empty message because it's illegal to send
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
true
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/gorilla/websocket/tls_handshake.go
vendor/github.com/gorilla/websocket/tls_handshake.go
//go:build go1.17 // +build go1.17 package websocket import ( "context" "crypto/tls" ) func doHandshake(ctx context.Context, tlsConn *tls.Conn, cfg *tls.Config) error { if err := tlsConn.HandshakeContext(ctx); err != nil { return err } if !cfg.InsecureSkipVerify { if err := tlsConn.VerifyHostname(cfg.ServerName); err != nil { return err } } return nil }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/gorilla/websocket/x_net_proxy.go
vendor/github.com/gorilla/websocket/x_net_proxy.go
// Code generated by golang.org/x/tools/cmd/bundle. DO NOT EDIT. //go:generate bundle -o x_net_proxy.go golang.org/x/net/proxy // Package proxy provides support for a variety of protocols to proxy network // data. // package websocket import ( "errors" "io" "net" "net/url" "os" "strconv" "strings" "sync" ) type proxy_direct struct{} // Direct is a direct proxy: one that makes network connections directly. var proxy_Direct = proxy_direct{} func (proxy_direct) Dial(network, addr string) (net.Conn, error) { return net.Dial(network, addr) } // A PerHost directs connections to a default Dialer unless the host name // requested matches one of a number of exceptions. type proxy_PerHost struct { def, bypass proxy_Dialer bypassNetworks []*net.IPNet bypassIPs []net.IP bypassZones []string bypassHosts []string } // NewPerHost returns a PerHost Dialer that directs connections to either // defaultDialer or bypass, depending on whether the connection matches one of // the configured rules. func proxy_NewPerHost(defaultDialer, bypass proxy_Dialer) *proxy_PerHost { return &proxy_PerHost{ def: defaultDialer, bypass: bypass, } } // Dial connects to the address addr on the given network through either // defaultDialer or bypass. func (p *proxy_PerHost) Dial(network, addr string) (c net.Conn, err error) { host, _, err := net.SplitHostPort(addr) if err != nil { return nil, err } return p.dialerForRequest(host).Dial(network, addr) } func (p *proxy_PerHost) dialerForRequest(host string) proxy_Dialer { if ip := net.ParseIP(host); ip != nil { for _, net := range p.bypassNetworks { if net.Contains(ip) { return p.bypass } } for _, bypassIP := range p.bypassIPs { if bypassIP.Equal(ip) { return p.bypass } } return p.def } for _, zone := range p.bypassZones { if strings.HasSuffix(host, zone) { return p.bypass } if host == zone[1:] { // For a zone ".example.com", we match "example.com" // too. return p.bypass } } for _, bypassHost := range p.bypassHosts { if bypassHost == host { return p.bypass } } return p.def } // AddFromString parses a string that contains comma-separated values // specifying hosts that should use the bypass proxy. Each value is either an // IP address, a CIDR range, a zone (*.example.com) or a host name // (localhost). A best effort is made to parse the string and errors are // ignored. func (p *proxy_PerHost) AddFromString(s string) { hosts := strings.Split(s, ",") for _, host := range hosts { host = strings.TrimSpace(host) if len(host) == 0 { continue } if strings.Contains(host, "/") { // We assume that it's a CIDR address like 127.0.0.0/8 if _, net, err := net.ParseCIDR(host); err == nil { p.AddNetwork(net) } continue } if ip := net.ParseIP(host); ip != nil { p.AddIP(ip) continue } if strings.HasPrefix(host, "*.") { p.AddZone(host[1:]) continue } p.AddHost(host) } } // AddIP specifies an IP address that will use the bypass proxy. Note that // this will only take effect if a literal IP address is dialed. A connection // to a named host will never match an IP. func (p *proxy_PerHost) AddIP(ip net.IP) { p.bypassIPs = append(p.bypassIPs, ip) } // AddNetwork specifies an IP range that will use the bypass proxy. Note that // this will only take effect if a literal IP address is dialed. A connection // to a named host will never match. func (p *proxy_PerHost) AddNetwork(net *net.IPNet) { p.bypassNetworks = append(p.bypassNetworks, net) } // AddZone specifies a DNS suffix that will use the bypass proxy. A zone of // "example.com" matches "example.com" and all of its subdomains. func (p *proxy_PerHost) AddZone(zone string) { if strings.HasSuffix(zone, ".") { zone = zone[:len(zone)-1] } if !strings.HasPrefix(zone, ".") { zone = "." + zone } p.bypassZones = append(p.bypassZones, zone) } // AddHost specifies a host name that will use the bypass proxy. func (p *proxy_PerHost) AddHost(host string) { if strings.HasSuffix(host, ".") { host = host[:len(host)-1] } p.bypassHosts = append(p.bypassHosts, host) } // A Dialer is a means to establish a connection. type proxy_Dialer interface { // Dial connects to the given address via the proxy. Dial(network, addr string) (c net.Conn, err error) } // Auth contains authentication parameters that specific Dialers may require. type proxy_Auth struct { User, Password string } // FromEnvironment returns the dialer specified by the proxy related variables in // the environment. func proxy_FromEnvironment() proxy_Dialer { allProxy := proxy_allProxyEnv.Get() if len(allProxy) == 0 { return proxy_Direct } proxyURL, err := url.Parse(allProxy) if err != nil { return proxy_Direct } proxy, err := proxy_FromURL(proxyURL, proxy_Direct) if err != nil { return proxy_Direct } noProxy := proxy_noProxyEnv.Get() if len(noProxy) == 0 { return proxy } perHost := proxy_NewPerHost(proxy, proxy_Direct) perHost.AddFromString(noProxy) return perHost } // proxySchemes is a map from URL schemes to a function that creates a Dialer // from a URL with such a scheme. var proxy_proxySchemes map[string]func(*url.URL, proxy_Dialer) (proxy_Dialer, error) // RegisterDialerType takes a URL scheme and a function to generate Dialers from // a URL with that scheme and a forwarding Dialer. Registered schemes are used // by FromURL. func proxy_RegisterDialerType(scheme string, f func(*url.URL, proxy_Dialer) (proxy_Dialer, error)) { if proxy_proxySchemes == nil { proxy_proxySchemes = make(map[string]func(*url.URL, proxy_Dialer) (proxy_Dialer, error)) } proxy_proxySchemes[scheme] = f } // FromURL returns a Dialer given a URL specification and an underlying // Dialer for it to make network requests. func proxy_FromURL(u *url.URL, forward proxy_Dialer) (proxy_Dialer, error) { var auth *proxy_Auth if u.User != nil { auth = new(proxy_Auth) auth.User = u.User.Username() if p, ok := u.User.Password(); ok { auth.Password = p } } switch u.Scheme { case "socks5": return proxy_SOCKS5("tcp", u.Host, auth, forward) } // If the scheme doesn't match any of the built-in schemes, see if it // was registered by another package. if proxy_proxySchemes != nil { if f, ok := proxy_proxySchemes[u.Scheme]; ok { return f(u, forward) } } return nil, errors.New("proxy: unknown scheme: " + u.Scheme) } var ( proxy_allProxyEnv = &proxy_envOnce{ names: []string{"ALL_PROXY", "all_proxy"}, } proxy_noProxyEnv = &proxy_envOnce{ names: []string{"NO_PROXY", "no_proxy"}, } ) // envOnce looks up an environment variable (optionally by multiple // names) once. It mitigates expensive lookups on some platforms // (e.g. Windows). // (Borrowed from net/http/transport.go) type proxy_envOnce struct { names []string once sync.Once val string } func (e *proxy_envOnce) Get() string { e.once.Do(e.init) return e.val } func (e *proxy_envOnce) init() { for _, n := range e.names { e.val = os.Getenv(n) if e.val != "" { return } } } // SOCKS5 returns a Dialer that makes SOCKSv5 connections to the given address // with an optional username and password. See RFC 1928 and RFC 1929. func proxy_SOCKS5(network, addr string, auth *proxy_Auth, forward proxy_Dialer) (proxy_Dialer, error) { s := &proxy_socks5{ network: network, addr: addr, forward: forward, } if auth != nil { s.user = auth.User s.password = auth.Password } return s, nil } type proxy_socks5 struct { user, password string network, addr string forward proxy_Dialer } const proxy_socks5Version = 5 const ( proxy_socks5AuthNone = 0 proxy_socks5AuthPassword = 2 ) const proxy_socks5Connect = 1 const ( proxy_socks5IP4 = 1 proxy_socks5Domain = 3 proxy_socks5IP6 = 4 ) var proxy_socks5Errors = []string{ "", "general failure", "connection forbidden", "network unreachable", "host unreachable", "connection refused", "TTL expired", "command not supported", "address type not supported", } // Dial connects to the address addr on the given network via the SOCKS5 proxy. func (s *proxy_socks5) Dial(network, addr string) (net.Conn, error) { switch network { case "tcp", "tcp6", "tcp4": default: return nil, errors.New("proxy: no support for SOCKS5 proxy connections of type " + network) } conn, err := s.forward.Dial(s.network, s.addr) if err != nil { return nil, err } if err := s.connect(conn, addr); err != nil { conn.Close() return nil, err } return conn, nil } // connect takes an existing connection to a socks5 proxy server, // and commands the server to extend that connection to target, // which must be a canonical address with a host and port. func (s *proxy_socks5) connect(conn net.Conn, target string) error { host, portStr, err := net.SplitHostPort(target) if err != nil { return err } port, err := strconv.Atoi(portStr) if err != nil { return errors.New("proxy: failed to parse port number: " + portStr) } if port < 1 || port > 0xffff { return errors.New("proxy: port number out of range: " + portStr) } // the size here is just an estimate buf := make([]byte, 0, 6+len(host)) buf = append(buf, proxy_socks5Version) if len(s.user) > 0 && len(s.user) < 256 && len(s.password) < 256 { buf = append(buf, 2 /* num auth methods */, proxy_socks5AuthNone, proxy_socks5AuthPassword) } else { buf = append(buf, 1 /* num auth methods */, proxy_socks5AuthNone) } if _, err := conn.Write(buf); err != nil { return errors.New("proxy: failed to write greeting to SOCKS5 proxy at " + s.addr + ": " + err.Error()) } if _, err := io.ReadFull(conn, buf[:2]); err != nil { return errors.New("proxy: failed to read greeting from SOCKS5 proxy at " + s.addr + ": " + err.Error()) } if buf[0] != 5 { return errors.New("proxy: SOCKS5 proxy at " + s.addr + " has unexpected version " + strconv.Itoa(int(buf[0]))) } if buf[1] == 0xff { return errors.New("proxy: SOCKS5 proxy at " + s.addr + " requires authentication") } // See RFC 1929 if buf[1] == proxy_socks5AuthPassword { buf = buf[:0] buf = append(buf, 1 /* password protocol version */) buf = append(buf, uint8(len(s.user))) buf = append(buf, s.user...) buf = append(buf, uint8(len(s.password))) buf = append(buf, s.password...) if _, err := conn.Write(buf); err != nil { return errors.New("proxy: failed to write authentication request to SOCKS5 proxy at " + s.addr + ": " + err.Error()) } if _, err := io.ReadFull(conn, buf[:2]); err != nil { return errors.New("proxy: failed to read authentication reply from SOCKS5 proxy at " + s.addr + ": " + err.Error()) } if buf[1] != 0 { return errors.New("proxy: SOCKS5 proxy at " + s.addr + " rejected username/password") } } buf = buf[:0] buf = append(buf, proxy_socks5Version, proxy_socks5Connect, 0 /* reserved */) if ip := net.ParseIP(host); ip != nil { if ip4 := ip.To4(); ip4 != nil { buf = append(buf, proxy_socks5IP4) ip = ip4 } else { buf = append(buf, proxy_socks5IP6) } buf = append(buf, ip...) } else { if len(host) > 255 { return errors.New("proxy: destination host name too long: " + host) } buf = append(buf, proxy_socks5Domain) buf = append(buf, byte(len(host))) buf = append(buf, host...) } buf = append(buf, byte(port>>8), byte(port)) if _, err := conn.Write(buf); err != nil { return errors.New("proxy: failed to write connect request to SOCKS5 proxy at " + s.addr + ": " + err.Error()) } if _, err := io.ReadFull(conn, buf[:4]); err != nil { return errors.New("proxy: failed to read connect reply from SOCKS5 proxy at " + s.addr + ": " + err.Error()) } failure := "unknown error" if int(buf[1]) < len(proxy_socks5Errors) { failure = proxy_socks5Errors[buf[1]] } if len(failure) > 0 { return errors.New("proxy: SOCKS5 proxy at " + s.addr + " failed to connect: " + failure) } bytesToDiscard := 0 switch buf[3] { case proxy_socks5IP4: bytesToDiscard = net.IPv4len case proxy_socks5IP6: bytesToDiscard = net.IPv6len case proxy_socks5Domain: _, err := io.ReadFull(conn, buf[:1]) if err != nil { return errors.New("proxy: failed to read domain length from SOCKS5 proxy at " + s.addr + ": " + err.Error()) } bytesToDiscard = int(buf[0]) default: return errors.New("proxy: got unknown address type " + strconv.Itoa(int(buf[3])) + " from SOCKS5 proxy at " + s.addr) } if cap(buf) < bytesToDiscard { buf = make([]byte, bytesToDiscard) } else { buf = buf[:bytesToDiscard] } if _, err := io.ReadFull(conn, buf); err != nil { return errors.New("proxy: failed to read address from SOCKS5 proxy at " + s.addr + ": " + err.Error()) } // Also need to discard the port number if _, err := io.ReadFull(conn, buf[:2]); err != nil { return errors.New("proxy: failed to read port from SOCKS5 proxy at " + s.addr + ": " + err.Error()) } return nil }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/gorilla/websocket/tls_handshake_116.go
vendor/github.com/gorilla/websocket/tls_handshake_116.go
//go:build !go1.17 // +build !go1.17 package websocket import ( "context" "crypto/tls" ) func doHandshake(ctx context.Context, tlsConn *tls.Conn, cfg *tls.Config) error { if err := tlsConn.Handshake(); err != nil { return err } if !cfg.InsecureSkipVerify { if err := tlsConn.VerifyHostname(cfg.ServerName); err != nil { return err } } return nil }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/gorilla/websocket/server.go
vendor/github.com/gorilla/websocket/server.go
// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package websocket import ( "bufio" "errors" "io" "net/http" "net/url" "strings" "time" ) // HandshakeError describes an error with the handshake from the peer. type HandshakeError struct { message string } func (e HandshakeError) Error() string { return e.message } // Upgrader specifies parameters for upgrading an HTTP connection to a // WebSocket connection. // // It is safe to call Upgrader's methods concurrently. type Upgrader struct { // HandshakeTimeout specifies the duration for the handshake to complete. HandshakeTimeout time.Duration // ReadBufferSize and WriteBufferSize specify I/O buffer sizes in bytes. If a buffer // size is zero, then buffers allocated by the HTTP server are used. The // I/O buffer sizes do not limit the size of the messages that can be sent // or received. ReadBufferSize, WriteBufferSize int // WriteBufferPool is a pool of buffers for write operations. If the value // is not set, then write buffers are allocated to the connection for the // lifetime of the connection. // // A pool is most useful when the application has a modest volume of writes // across a large number of connections. // // Applications should use a single pool for each unique value of // WriteBufferSize. WriteBufferPool BufferPool // Subprotocols specifies the server's supported protocols in order of // preference. If this field is not nil, then the Upgrade method negotiates a // subprotocol by selecting the first match in this list with a protocol // requested by the client. If there's no match, then no protocol is // negotiated (the Sec-Websocket-Protocol header is not included in the // handshake response). Subprotocols []string // Error specifies the function for generating HTTP error responses. If Error // is nil, then http.Error is used to generate the HTTP response. Error func(w http.ResponseWriter, r *http.Request, status int, reason error) // CheckOrigin returns true if the request Origin header is acceptable. If // CheckOrigin is nil, then a safe default is used: return false if the // Origin request header is present and the origin host is not equal to // request Host header. // // A CheckOrigin function should carefully validate the request origin to // prevent cross-site request forgery. CheckOrigin func(r *http.Request) bool // EnableCompression specify if the server should attempt to negotiate per // message compression (RFC 7692). Setting this value to true does not // guarantee that compression will be supported. Currently only "no context // takeover" modes are supported. EnableCompression bool } func (u *Upgrader) returnError(w http.ResponseWriter, r *http.Request, status int, reason string) (*Conn, error) { err := HandshakeError{reason} if u.Error != nil { u.Error(w, r, status, err) } else { w.Header().Set("Sec-Websocket-Version", "13") http.Error(w, http.StatusText(status), status) } return nil, err } // checkSameOrigin returns true if the origin is not set or is equal to the request host. func checkSameOrigin(r *http.Request) bool { origin := r.Header["Origin"] if len(origin) == 0 { return true } u, err := url.Parse(origin[0]) if err != nil { return false } return equalASCIIFold(u.Host, r.Host) } func (u *Upgrader) selectSubprotocol(r *http.Request, responseHeader http.Header) string { if u.Subprotocols != nil { clientProtocols := Subprotocols(r) for _, serverProtocol := range u.Subprotocols { for _, clientProtocol := range clientProtocols { if clientProtocol == serverProtocol { return clientProtocol } } } } else if responseHeader != nil { return responseHeader.Get("Sec-Websocket-Protocol") } return "" } // Upgrade upgrades the HTTP server connection to the WebSocket protocol. // // The responseHeader is included in the response to the client's upgrade // request. Use the responseHeader to specify cookies (Set-Cookie). To specify // subprotocols supported by the server, set Upgrader.Subprotocols directly. // // If the upgrade fails, then Upgrade replies to the client with an HTTP error // response. func (u *Upgrader) Upgrade(w http.ResponseWriter, r *http.Request, responseHeader http.Header) (*Conn, error) { const badHandshake = "websocket: the client is not using the websocket protocol: " if !tokenListContainsValue(r.Header, "Connection", "upgrade") { return u.returnError(w, r, http.StatusBadRequest, badHandshake+"'upgrade' token not found in 'Connection' header") } if !tokenListContainsValue(r.Header, "Upgrade", "websocket") { return u.returnError(w, r, http.StatusBadRequest, badHandshake+"'websocket' token not found in 'Upgrade' header") } if r.Method != http.MethodGet { return u.returnError(w, r, http.StatusMethodNotAllowed, badHandshake+"request method is not GET") } if !tokenListContainsValue(r.Header, "Sec-Websocket-Version", "13") { return u.returnError(w, r, http.StatusBadRequest, "websocket: unsupported version: 13 not found in 'Sec-Websocket-Version' header") } if _, ok := responseHeader["Sec-Websocket-Extensions"]; ok { return u.returnError(w, r, http.StatusInternalServerError, "websocket: application specific 'Sec-WebSocket-Extensions' headers are unsupported") } checkOrigin := u.CheckOrigin if checkOrigin == nil { checkOrigin = checkSameOrigin } if !checkOrigin(r) { return u.returnError(w, r, http.StatusForbidden, "websocket: request origin not allowed by Upgrader.CheckOrigin") } challengeKey := r.Header.Get("Sec-Websocket-Key") if !isValidChallengeKey(challengeKey) { return u.returnError(w, r, http.StatusBadRequest, "websocket: not a websocket handshake: 'Sec-WebSocket-Key' header must be Base64 encoded value of 16-byte in length") } subprotocol := u.selectSubprotocol(r, responseHeader) // Negotiate PMCE var compress bool if u.EnableCompression { for _, ext := range parseExtensions(r.Header) { if ext[""] != "permessage-deflate" { continue } compress = true break } } h, ok := w.(http.Hijacker) if !ok { return u.returnError(w, r, http.StatusInternalServerError, "websocket: response does not implement http.Hijacker") } var brw *bufio.ReadWriter netConn, brw, err := h.Hijack() if err != nil { return u.returnError(w, r, http.StatusInternalServerError, err.Error()) } if brw.Reader.Buffered() > 0 { netConn.Close() return nil, errors.New("websocket: client sent data before handshake is complete") } var br *bufio.Reader if u.ReadBufferSize == 0 && bufioReaderSize(netConn, brw.Reader) > 256 { // Reuse hijacked buffered reader as connection reader. br = brw.Reader } buf := bufioWriterBuffer(netConn, brw.Writer) var writeBuf []byte if u.WriteBufferPool == nil && u.WriteBufferSize == 0 && len(buf) >= maxFrameHeaderSize+256 { // Reuse hijacked write buffer as connection buffer. writeBuf = buf } c := newConn(netConn, true, u.ReadBufferSize, u.WriteBufferSize, u.WriteBufferPool, br, writeBuf) c.subprotocol = subprotocol if compress { c.newCompressionWriter = compressNoContextTakeover c.newDecompressionReader = decompressNoContextTakeover } // Use larger of hijacked buffer and connection write buffer for header. p := buf if len(c.writeBuf) > len(p) { p = c.writeBuf } p = p[:0] p = append(p, "HTTP/1.1 101 Switching Protocols\r\nUpgrade: websocket\r\nConnection: Upgrade\r\nSec-WebSocket-Accept: "...) p = append(p, computeAcceptKey(challengeKey)...) p = append(p, "\r\n"...) if c.subprotocol != "" { p = append(p, "Sec-WebSocket-Protocol: "...) p = append(p, c.subprotocol...) p = append(p, "\r\n"...) } if compress { p = append(p, "Sec-WebSocket-Extensions: permessage-deflate; server_no_context_takeover; client_no_context_takeover\r\n"...) } for k, vs := range responseHeader { if k == "Sec-Websocket-Protocol" { continue } for _, v := range vs { p = append(p, k...) p = append(p, ": "...) for i := 0; i < len(v); i++ { b := v[i] if b <= 31 { // prevent response splitting. b = ' ' } p = append(p, b) } p = append(p, "\r\n"...) } } p = append(p, "\r\n"...) // Clear deadlines set by HTTP server. netConn.SetDeadline(time.Time{}) if u.HandshakeTimeout > 0 { netConn.SetWriteDeadline(time.Now().Add(u.HandshakeTimeout)) } if _, err = netConn.Write(p); err != nil { netConn.Close() return nil, err } if u.HandshakeTimeout > 0 { netConn.SetWriteDeadline(time.Time{}) } return c, nil } // Upgrade upgrades the HTTP server connection to the WebSocket protocol. // // Deprecated: Use websocket.Upgrader instead. // // Upgrade does not perform origin checking. The application is responsible for // checking the Origin header before calling Upgrade. An example implementation // of the same origin policy check is: // // if req.Header.Get("Origin") != "http://"+req.Host { // http.Error(w, "Origin not allowed", http.StatusForbidden) // return // } // // If the endpoint supports subprotocols, then the application is responsible // for negotiating the protocol used on the connection. Use the Subprotocols() // function to get the subprotocols requested by the client. Use the // Sec-Websocket-Protocol response header to specify the subprotocol selected // by the application. // // The responseHeader is included in the response to the client's upgrade // request. Use the responseHeader to specify cookies (Set-Cookie) and the // negotiated subprotocol (Sec-Websocket-Protocol). // // The connection buffers IO to the underlying network connection. The // readBufSize and writeBufSize parameters specify the size of the buffers to // use. Messages can be larger than the buffers. // // If the request is not a valid WebSocket handshake, then Upgrade returns an // error of type HandshakeError. Applications should handle this error by // replying to the client with an HTTP error response. func Upgrade(w http.ResponseWriter, r *http.Request, responseHeader http.Header, readBufSize, writeBufSize int) (*Conn, error) { u := Upgrader{ReadBufferSize: readBufSize, WriteBufferSize: writeBufSize} u.Error = func(w http.ResponseWriter, r *http.Request, status int, reason error) { // don't return errors to maintain backwards compatibility } u.CheckOrigin = func(r *http.Request) bool { // allow all connections by default return true } return u.Upgrade(w, r, responseHeader) } // Subprotocols returns the subprotocols requested by the client in the // Sec-Websocket-Protocol header. func Subprotocols(r *http.Request) []string { h := strings.TrimSpace(r.Header.Get("Sec-Websocket-Protocol")) if h == "" { return nil } protocols := strings.Split(h, ",") for i := range protocols { protocols[i] = strings.TrimSpace(protocols[i]) } return protocols } // IsWebSocketUpgrade returns true if the client requested upgrade to the // WebSocket protocol. func IsWebSocketUpgrade(r *http.Request) bool { return tokenListContainsValue(r.Header, "Connection", "upgrade") && tokenListContainsValue(r.Header, "Upgrade", "websocket") } // bufioReaderSize size returns the size of a bufio.Reader. func bufioReaderSize(originalReader io.Reader, br *bufio.Reader) int { // This code assumes that peek on a reset reader returns // bufio.Reader.buf[:0]. // TODO: Use bufio.Reader.Size() after Go 1.10 br.Reset(originalReader) if p, err := br.Peek(0); err == nil { return cap(p) } return 0 } // writeHook is an io.Writer that records the last slice passed to it vio // io.Writer.Write. type writeHook struct { p []byte } func (wh *writeHook) Write(p []byte) (int, error) { wh.p = p return len(p), nil } // bufioWriterBuffer grabs the buffer from a bufio.Writer. func bufioWriterBuffer(originalWriter io.Writer, bw *bufio.Writer) []byte { // This code assumes that bufio.Writer.buf[:1] is passed to the // bufio.Writer's underlying writer. var wh writeHook bw.Reset(&wh) bw.WriteByte(0) bw.Flush() bw.Reset(originalWriter) return wh.p[:cap(wh.p)] }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/gorilla/websocket/doc.go
vendor/github.com/gorilla/websocket/doc.go
// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Package websocket implements the WebSocket protocol defined in RFC 6455. // // Overview // // The Conn type represents a WebSocket connection. A server application calls // the Upgrader.Upgrade method from an HTTP request handler to get a *Conn: // // var upgrader = websocket.Upgrader{ // ReadBufferSize: 1024, // WriteBufferSize: 1024, // } // // func handler(w http.ResponseWriter, r *http.Request) { // conn, err := upgrader.Upgrade(w, r, nil) // if err != nil { // log.Println(err) // return // } // ... Use conn to send and receive messages. // } // // Call the connection's WriteMessage and ReadMessage methods to send and // receive messages as a slice of bytes. This snippet of code shows how to echo // messages using these methods: // // for { // messageType, p, err := conn.ReadMessage() // if err != nil { // log.Println(err) // return // } // if err := conn.WriteMessage(messageType, p); err != nil { // log.Println(err) // return // } // } // // In above snippet of code, p is a []byte and messageType is an int with value // websocket.BinaryMessage or websocket.TextMessage. // // An application can also send and receive messages using the io.WriteCloser // and io.Reader interfaces. To send a message, call the connection NextWriter // method to get an io.WriteCloser, write the message to the writer and close // the writer when done. To receive a message, call the connection NextReader // method to get an io.Reader and read until io.EOF is returned. This snippet // shows how to echo messages using the NextWriter and NextReader methods: // // for { // messageType, r, err := conn.NextReader() // if err != nil { // return // } // w, err := conn.NextWriter(messageType) // if err != nil { // return err // } // if _, err := io.Copy(w, r); err != nil { // return err // } // if err := w.Close(); err != nil { // return err // } // } // // Data Messages // // The WebSocket protocol distinguishes between text and binary data messages. // Text messages are interpreted as UTF-8 encoded text. The interpretation of // binary messages is left to the application. // // This package uses the TextMessage and BinaryMessage integer constants to // identify the two data message types. The ReadMessage and NextReader methods // return the type of the received message. The messageType argument to the // WriteMessage and NextWriter methods specifies the type of a sent message. // // It is the application's responsibility to ensure that text messages are // valid UTF-8 encoded text. // // Control Messages // // The WebSocket protocol defines three types of control messages: close, ping // and pong. Call the connection WriteControl, WriteMessage or NextWriter // methods to send a control message to the peer. // // Connections handle received close messages by calling the handler function // set with the SetCloseHandler method and by returning a *CloseError from the // NextReader, ReadMessage or the message Read method. The default close // handler sends a close message to the peer. // // Connections handle received ping messages by calling the handler function // set with the SetPingHandler method. The default ping handler sends a pong // message to the peer. // // Connections handle received pong messages by calling the handler function // set with the SetPongHandler method. The default pong handler does nothing. // If an application sends ping messages, then the application should set a // pong handler to receive the corresponding pong. // // The control message handler functions are called from the NextReader, // ReadMessage and message reader Read methods. The default close and ping // handlers can block these methods for a short time when the handler writes to // the connection. // // The application must read the connection to process close, ping and pong // messages sent from the peer. If the application is not otherwise interested // in messages from the peer, then the application should start a goroutine to // read and discard messages from the peer. A simple example is: // // func readLoop(c *websocket.Conn) { // for { // if _, _, err := c.NextReader(); err != nil { // c.Close() // break // } // } // } // // Concurrency // // Connections support one concurrent reader and one concurrent writer. // // Applications are responsible for ensuring that no more than one goroutine // calls the write methods (NextWriter, SetWriteDeadline, WriteMessage, // WriteJSON, EnableWriteCompression, SetCompressionLevel) concurrently and // that no more than one goroutine calls the read methods (NextReader, // SetReadDeadline, ReadMessage, ReadJSON, SetPongHandler, SetPingHandler) // concurrently. // // The Close and WriteControl methods can be called concurrently with all other // methods. // // Origin Considerations // // Web browsers allow Javascript applications to open a WebSocket connection to // any host. It's up to the server to enforce an origin policy using the Origin // request header sent by the browser. // // The Upgrader calls the function specified in the CheckOrigin field to check // the origin. If the CheckOrigin function returns false, then the Upgrade // method fails the WebSocket handshake with HTTP status 403. // // If the CheckOrigin field is nil, then the Upgrader uses a safe default: fail // the handshake if the Origin request header is present and the Origin host is // not equal to the Host request header. // // The deprecated package-level Upgrade function does not perform origin // checking. The application is responsible for checking the Origin header // before calling the Upgrade function. // // Buffers // // Connections buffer network input and output to reduce the number // of system calls when reading or writing messages. // // Write buffers are also used for constructing WebSocket frames. See RFC 6455, // Section 5 for a discussion of message framing. A WebSocket frame header is // written to the network each time a write buffer is flushed to the network. // Decreasing the size of the write buffer can increase the amount of framing // overhead on the connection. // // The buffer sizes in bytes are specified by the ReadBufferSize and // WriteBufferSize fields in the Dialer and Upgrader. The Dialer uses a default // size of 4096 when a buffer size field is set to zero. The Upgrader reuses // buffers created by the HTTP server when a buffer size field is set to zero. // The HTTP server buffers have a size of 4096 at the time of this writing. // // The buffer sizes do not limit the size of a message that can be read or // written by a connection. // // Buffers are held for the lifetime of the connection by default. If the // Dialer or Upgrader WriteBufferPool field is set, then a connection holds the // write buffer only when writing a message. // // Applications should tune the buffer sizes to balance memory use and // performance. Increasing the buffer size uses more memory, but can reduce the // number of system calls to read or write the network. In the case of writing, // increasing the buffer size can reduce the number of frame headers written to // the network. // // Some guidelines for setting buffer parameters are: // // Limit the buffer sizes to the maximum expected message size. Buffers larger // than the largest message do not provide any benefit. // // Depending on the distribution of message sizes, setting the buffer size to // a value less than the maximum expected message size can greatly reduce memory // use with a small impact on performance. Here's an example: If 99% of the // messages are smaller than 256 bytes and the maximum message size is 512 // bytes, then a buffer size of 256 bytes will result in 1.01 more system calls // than a buffer size of 512 bytes. The memory savings is 50%. // // A write buffer pool is useful when the application has a modest number // writes over a large number of connections. when buffers are pooled, a larger // buffer size has a reduced impact on total memory use and has the benefit of // reducing system calls and frame overhead. // // Compression EXPERIMENTAL // // Per message compression extensions (RFC 7692) are experimentally supported // by this package in a limited capacity. Setting the EnableCompression option // to true in Dialer or Upgrader will attempt to negotiate per message deflate // support. // // var upgrader = websocket.Upgrader{ // EnableCompression: true, // } // // If compression was successfully negotiated with the connection's peer, any // message received in compressed form will be automatically decompressed. // All Read methods will return uncompressed bytes. // // Per message compression of messages written to a connection can be enabled // or disabled by calling the corresponding Conn method: // // conn.EnableWriteCompression(false) // // Currently this package does not support compression with "context takeover". // This means that messages must be compressed and decompressed in isolation, // without retaining sliding window or dictionary state across messages. For // more details refer to RFC 7692. // // Use of compression is experimental and may result in decreased performance. package websocket
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/gorilla/websocket/prepared.go
vendor/github.com/gorilla/websocket/prepared.go
// Copyright 2017 The Gorilla WebSocket Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package websocket import ( "bytes" "net" "sync" "time" ) // PreparedMessage caches on the wire representations of a message payload. // Use PreparedMessage to efficiently send a message payload to multiple // connections. PreparedMessage is especially useful when compression is used // because the CPU and memory expensive compression operation can be executed // once for a given set of compression options. type PreparedMessage struct { messageType int data []byte mu sync.Mutex frames map[prepareKey]*preparedFrame } // prepareKey defines a unique set of options to cache prepared frames in PreparedMessage. type prepareKey struct { isServer bool compress bool compressionLevel int } // preparedFrame contains data in wire representation. type preparedFrame struct { once sync.Once data []byte } // NewPreparedMessage returns an initialized PreparedMessage. You can then send // it to connection using WritePreparedMessage method. Valid wire // representation will be calculated lazily only once for a set of current // connection options. func NewPreparedMessage(messageType int, data []byte) (*PreparedMessage, error) { pm := &PreparedMessage{ messageType: messageType, frames: make(map[prepareKey]*preparedFrame), data: data, } // Prepare a plain server frame. _, frameData, err := pm.frame(prepareKey{isServer: true, compress: false}) if err != nil { return nil, err } // To protect against caller modifying the data argument, remember the data // copied to the plain server frame. pm.data = frameData[len(frameData)-len(data):] return pm, nil } func (pm *PreparedMessage) frame(key prepareKey) (int, []byte, error) { pm.mu.Lock() frame, ok := pm.frames[key] if !ok { frame = &preparedFrame{} pm.frames[key] = frame } pm.mu.Unlock() var err error frame.once.Do(func() { // Prepare a frame using a 'fake' connection. // TODO: Refactor code in conn.go to allow more direct construction of // the frame. mu := make(chan struct{}, 1) mu <- struct{}{} var nc prepareConn c := &Conn{ conn: &nc, mu: mu, isServer: key.isServer, compressionLevel: key.compressionLevel, enableWriteCompression: true, writeBuf: make([]byte, defaultWriteBufferSize+maxFrameHeaderSize), } if key.compress { c.newCompressionWriter = compressNoContextTakeover } err = c.WriteMessage(pm.messageType, pm.data) frame.data = nc.buf.Bytes() }) return pm.messageType, frame.data, err } type prepareConn struct { buf bytes.Buffer net.Conn } func (pc *prepareConn) Write(p []byte) (int, error) { return pc.buf.Write(p) } func (pc *prepareConn) SetWriteDeadline(t time.Time) error { return nil }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/gorilla/websocket/compression.go
vendor/github.com/gorilla/websocket/compression.go
// Copyright 2017 The Gorilla WebSocket Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package websocket import ( "compress/flate" "errors" "io" "strings" "sync" ) const ( minCompressionLevel = -2 // flate.HuffmanOnly not defined in Go < 1.6 maxCompressionLevel = flate.BestCompression defaultCompressionLevel = 1 ) var ( flateWriterPools [maxCompressionLevel - minCompressionLevel + 1]sync.Pool flateReaderPool = sync.Pool{New: func() interface{} { return flate.NewReader(nil) }} ) func decompressNoContextTakeover(r io.Reader) io.ReadCloser { const tail = // Add four bytes as specified in RFC "\x00\x00\xff\xff" + // Add final block to squelch unexpected EOF error from flate reader. "\x01\x00\x00\xff\xff" fr, _ := flateReaderPool.Get().(io.ReadCloser) fr.(flate.Resetter).Reset(io.MultiReader(r, strings.NewReader(tail)), nil) return &flateReadWrapper{fr} } func isValidCompressionLevel(level int) bool { return minCompressionLevel <= level && level <= maxCompressionLevel } func compressNoContextTakeover(w io.WriteCloser, level int) io.WriteCloser { p := &flateWriterPools[level-minCompressionLevel] tw := &truncWriter{w: w} fw, _ := p.Get().(*flate.Writer) if fw == nil { fw, _ = flate.NewWriter(tw, level) } else { fw.Reset(tw) } return &flateWriteWrapper{fw: fw, tw: tw, p: p} } // truncWriter is an io.Writer that writes all but the last four bytes of the // stream to another io.Writer. type truncWriter struct { w io.WriteCloser n int p [4]byte } func (w *truncWriter) Write(p []byte) (int, error) { n := 0 // fill buffer first for simplicity. if w.n < len(w.p) { n = copy(w.p[w.n:], p) p = p[n:] w.n += n if len(p) == 0 { return n, nil } } m := len(p) if m > len(w.p) { m = len(w.p) } if nn, err := w.w.Write(w.p[:m]); err != nil { return n + nn, err } copy(w.p[:], w.p[m:]) copy(w.p[len(w.p)-m:], p[len(p)-m:]) nn, err := w.w.Write(p[:len(p)-m]) return n + nn, err } type flateWriteWrapper struct { fw *flate.Writer tw *truncWriter p *sync.Pool } func (w *flateWriteWrapper) Write(p []byte) (int, error) { if w.fw == nil { return 0, errWriteClosed } return w.fw.Write(p) } func (w *flateWriteWrapper) Close() error { if w.fw == nil { return errWriteClosed } err1 := w.fw.Flush() w.p.Put(w.fw) w.fw = nil if w.tw.p != [4]byte{0, 0, 0xff, 0xff} { return errors.New("websocket: internal error, unexpected bytes at end of flate stream") } err2 := w.tw.w.Close() if err1 != nil { return err1 } return err2 } type flateReadWrapper struct { fr io.ReadCloser } func (r *flateReadWrapper) Read(p []byte) (int, error) { if r.fr == nil { return 0, io.ErrClosedPipe } n, err := r.fr.Read(p) if err == io.EOF { // Preemptively place the reader back in the pool. This helps with // scenarios where the application does not call NextReader() soon after // this final read. r.Close() } return n, err } func (r *flateReadWrapper) Close() error { if r.fr == nil { return io.ErrClosedPipe } err := r.fr.Close() flateReaderPool.Put(r.fr) r.fr = nil return err }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/go-logr/logr/logr.go
vendor/github.com/go-logr/logr/logr.go
/* Copyright 2019 The logr Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // This design derives from Dave Cheney's blog: // http://dave.cheney.net/2015/11/05/lets-talk-about-logging // Package logr defines a general-purpose logging API and abstract interfaces // to back that API. Packages in the Go ecosystem can depend on this package, // while callers can implement logging with whatever backend is appropriate. // // # Usage // // Logging is done using a Logger instance. Logger is a concrete type with // methods, which defers the actual logging to a LogSink interface. The main // methods of Logger are Info() and Error(). Arguments to Info() and Error() // are key/value pairs rather than printf-style formatted strings, emphasizing // "structured logging". // // With Go's standard log package, we might write: // // log.Printf("setting target value %s", targetValue) // // With logr's structured logging, we'd write: // // logger.Info("setting target", "value", targetValue) // // Errors are much the same. Instead of: // // log.Printf("failed to open the pod bay door for user %s: %v", user, err) // // We'd write: // // logger.Error(err, "failed to open the pod bay door", "user", user) // // Info() and Error() are very similar, but they are separate methods so that // LogSink implementations can choose to do things like attach additional // information (such as stack traces) on calls to Error(). Error() messages are // always logged, regardless of the current verbosity. If there is no error // instance available, passing nil is valid. // // # Verbosity // // Often we want to log information only when the application in "verbose // mode". To write log lines that are more verbose, Logger has a V() method. // The higher the V-level of a log line, the less critical it is considered. // Log-lines with V-levels that are not enabled (as per the LogSink) will not // be written. Level V(0) is the default, and logger.V(0).Info() has the same // meaning as logger.Info(). Negative V-levels have the same meaning as V(0). // Error messages do not have a verbosity level and are always logged. // // Where we might have written: // // if flVerbose >= 2 { // log.Printf("an unusual thing happened") // } // // We can write: // // logger.V(2).Info("an unusual thing happened") // // # Logger Names // // Logger instances can have name strings so that all messages logged through // that instance have additional context. For example, you might want to add // a subsystem name: // // logger.WithName("compactor").Info("started", "time", time.Now()) // // The WithName() method returns a new Logger, which can be passed to // constructors or other functions for further use. Repeated use of WithName() // will accumulate name "segments". These name segments will be joined in some // way by the LogSink implementation. It is strongly recommended that name // segments contain simple identifiers (letters, digits, and hyphen), and do // not contain characters that could muddle the log output or confuse the // joining operation (e.g. whitespace, commas, periods, slashes, brackets, // quotes, etc). // // # Saved Values // // Logger instances can store any number of key/value pairs, which will be // logged alongside all messages logged through that instance. For example, // you might want to create a Logger instance per managed object: // // With the standard log package, we might write: // // log.Printf("decided to set field foo to value %q for object %s/%s", // targetValue, object.Namespace, object.Name) // // With logr we'd write: // // // Elsewhere: set up the logger to log the object name. // obj.logger = mainLogger.WithValues( // "name", obj.name, "namespace", obj.namespace) // // // later on... // obj.logger.Info("setting foo", "value", targetValue) // // # Best Practices // // Logger has very few hard rules, with the goal that LogSink implementations // might have a lot of freedom to differentiate. There are, however, some // things to consider. // // The log message consists of a constant message attached to the log line. // This should generally be a simple description of what's occurring, and should // never be a format string. Variable information can then be attached using // named values. // // Keys are arbitrary strings, but should generally be constant values. Values // may be any Go value, but how the value is formatted is determined by the // LogSink implementation. // // Logger instances are meant to be passed around by value. Code that receives // such a value can call its methods without having to check whether the // instance is ready for use. // // The zero logger (= Logger{}) is identical to Discard() and discards all log // entries. Code that receives a Logger by value can simply call it, the methods // will never crash. For cases where passing a logger is optional, a pointer to Logger // should be used. // // # Key Naming Conventions // // Keys are not strictly required to conform to any specification or regex, but // it is recommended that they: // - be human-readable and meaningful (not auto-generated or simple ordinals) // - be constant (not dependent on input data) // - contain only printable characters // - not contain whitespace or punctuation // - use lower case for simple keys and lowerCamelCase for more complex ones // // These guidelines help ensure that log data is processed properly regardless // of the log implementation. For example, log implementations will try to // output JSON data or will store data for later database (e.g. SQL) queries. // // While users are generally free to use key names of their choice, it's // generally best to avoid using the following keys, as they're frequently used // by implementations: // - "caller": the calling information (file/line) of a particular log line // - "error": the underlying error value in the `Error` method // - "level": the log level // - "logger": the name of the associated logger // - "msg": the log message // - "stacktrace": the stack trace associated with a particular log line or // error (often from the `Error` message) // - "ts": the timestamp for a log line // // Implementations are encouraged to make use of these keys to represent the // above concepts, when necessary (for example, in a pure-JSON output form, it // would be necessary to represent at least message and timestamp as ordinary // named values). // // # Break Glass // // Implementations may choose to give callers access to the underlying // logging implementation. The recommended pattern for this is: // // // Underlier exposes access to the underlying logging implementation. // // Since callers only have a logr.Logger, they have to know which // // implementation is in use, so this interface is less of an abstraction // // and more of way to test type conversion. // type Underlier interface { // GetUnderlying() <underlying-type> // } // // Logger grants access to the sink to enable type assertions like this: // // func DoSomethingWithImpl(log logr.Logger) { // if underlier, ok := log.GetSink().(impl.Underlier); ok { // implLogger := underlier.GetUnderlying() // ... // } // } // // Custom `With*` functions can be implemented by copying the complete // Logger struct and replacing the sink in the copy: // // // WithFooBar changes the foobar parameter in the log sink and returns a // // new logger with that modified sink. It does nothing for loggers where // // the sink doesn't support that parameter. // func WithFoobar(log logr.Logger, foobar int) logr.Logger { // if foobarLogSink, ok := log.GetSink().(FoobarSink); ok { // log = log.WithSink(foobarLogSink.WithFooBar(foobar)) // } // return log // } // // Don't use New to construct a new Logger with a LogSink retrieved from an // existing Logger. Source code attribution might not work correctly and // unexported fields in Logger get lost. // // Beware that the same LogSink instance may be shared by different logger // instances. Calling functions that modify the LogSink will affect all of // those. package logr // New returns a new Logger instance. This is primarily used by libraries // implementing LogSink, rather than end users. Passing a nil sink will create // a Logger which discards all log lines. func New(sink LogSink) Logger { logger := Logger{} logger.setSink(sink) if sink != nil { sink.Init(runtimeInfo) } return logger } // setSink stores the sink and updates any related fields. It mutates the // logger and thus is only safe to use for loggers that are not currently being // used concurrently. func (l *Logger) setSink(sink LogSink) { l.sink = sink } // GetSink returns the stored sink. func (l Logger) GetSink() LogSink { return l.sink } // WithSink returns a copy of the logger with the new sink. func (l Logger) WithSink(sink LogSink) Logger { l.setSink(sink) return l } // Logger is an interface to an abstract logging implementation. This is a // concrete type for performance reasons, but all the real work is passed on to // a LogSink. Implementations of LogSink should provide their own constructors // that return Logger, not LogSink. // // The underlying sink can be accessed through GetSink and be modified through // WithSink. This enables the implementation of custom extensions (see "Break // Glass" in the package documentation). Normally the sink should be used only // indirectly. type Logger struct { sink LogSink level int } // Enabled tests whether this Logger is enabled. For example, commandline // flags might be used to set the logging verbosity and disable some info logs. func (l Logger) Enabled() bool { // Some implementations of LogSink look at the caller in Enabled (e.g. // different verbosity levels per package or file), but we only pass one // CallDepth in (via Init). This means that all calls from Logger to the // LogSink's Enabled, Info, and Error methods must have the same number of // frames. In other words, Logger methods can't call other Logger methods // which call these LogSink methods unless we do it the same in all paths. return l.sink != nil && l.sink.Enabled(l.level) } // Info logs a non-error message with the given key/value pairs as context. // // The msg argument should be used to add some constant description to the log // line. The key/value pairs can then be used to add additional variable // information. The key/value pairs must alternate string keys and arbitrary // values. func (l Logger) Info(msg string, keysAndValues ...any) { if l.sink == nil { return } if l.sink.Enabled(l.level) { // see comment in Enabled if withHelper, ok := l.sink.(CallStackHelperLogSink); ok { withHelper.GetCallStackHelper()() } l.sink.Info(l.level, msg, keysAndValues...) } } // Error logs an error, with the given message and key/value pairs as context. // It functions similarly to Info, but may have unique behavior, and should be // preferred for logging errors (see the package documentations for more // information). The log message will always be emitted, regardless of // verbosity level. // // The msg argument should be used to add context to any underlying error, // while the err argument should be used to attach the actual error that // triggered this log line, if present. The err parameter is optional // and nil may be passed instead of an error instance. func (l Logger) Error(err error, msg string, keysAndValues ...any) { if l.sink == nil { return } if withHelper, ok := l.sink.(CallStackHelperLogSink); ok { withHelper.GetCallStackHelper()() } l.sink.Error(err, msg, keysAndValues...) } // V returns a new Logger instance for a specific verbosity level, relative to // this Logger. In other words, V-levels are additive. A higher verbosity // level means a log message is less important. Negative V-levels are treated // as 0. func (l Logger) V(level int) Logger { if l.sink == nil { return l } if level < 0 { level = 0 } l.level += level return l } // GetV returns the verbosity level of the logger. If the logger's LogSink is // nil as in the Discard logger, this will always return 0. func (l Logger) GetV() int { // 0 if l.sink nil because of the if check in V above. return l.level } // WithValues returns a new Logger instance with additional key/value pairs. // See Info for documentation on how key/value pairs work. func (l Logger) WithValues(keysAndValues ...any) Logger { if l.sink == nil { return l } l.setSink(l.sink.WithValues(keysAndValues...)) return l } // WithName returns a new Logger instance with the specified name element added // to the Logger's name. Successive calls with WithName append additional // suffixes to the Logger's name. It's strongly recommended that name segments // contain only letters, digits, and hyphens (see the package documentation for // more information). func (l Logger) WithName(name string) Logger { if l.sink == nil { return l } l.setSink(l.sink.WithName(name)) return l } // WithCallDepth returns a Logger instance that offsets the call stack by the // specified number of frames when logging call site information, if possible. // This is useful for users who have helper functions between the "real" call // site and the actual calls to Logger methods. If depth is 0 the attribution // should be to the direct caller of this function. If depth is 1 the // attribution should skip 1 call frame, and so on. Successive calls to this // are additive. // // If the underlying log implementation supports a WithCallDepth(int) method, // it will be called and the result returned. If the implementation does not // support CallDepthLogSink, the original Logger will be returned. // // To skip one level, WithCallStackHelper() should be used instead of // WithCallDepth(1) because it works with implementions that support the // CallDepthLogSink and/or CallStackHelperLogSink interfaces. func (l Logger) WithCallDepth(depth int) Logger { if l.sink == nil { return l } if withCallDepth, ok := l.sink.(CallDepthLogSink); ok { l.setSink(withCallDepth.WithCallDepth(depth)) } return l } // WithCallStackHelper returns a new Logger instance that skips the direct // caller when logging call site information, if possible. This is useful for // users who have helper functions between the "real" call site and the actual // calls to Logger methods and want to support loggers which depend on marking // each individual helper function, like loggers based on testing.T. // // In addition to using that new logger instance, callers also must call the // returned function. // // If the underlying log implementation supports a WithCallDepth(int) method, // WithCallDepth(1) will be called to produce a new logger. If it supports a // WithCallStackHelper() method, that will be also called. If the // implementation does not support either of these, the original Logger will be // returned. func (l Logger) WithCallStackHelper() (func(), Logger) { if l.sink == nil { return func() {}, l } var helper func() if withCallDepth, ok := l.sink.(CallDepthLogSink); ok { l.setSink(withCallDepth.WithCallDepth(1)) } if withHelper, ok := l.sink.(CallStackHelperLogSink); ok { helper = withHelper.GetCallStackHelper() } else { helper = func() {} } return helper, l } // IsZero returns true if this logger is an uninitialized zero value func (l Logger) IsZero() bool { return l.sink == nil } // RuntimeInfo holds information that the logr "core" library knows which // LogSinks might want to know. type RuntimeInfo struct { // CallDepth is the number of call frames the logr library adds between the // end-user and the LogSink. LogSink implementations which choose to print // the original logging site (e.g. file & line) should climb this many // additional frames to find it. CallDepth int } // runtimeInfo is a static global. It must not be changed at run time. var runtimeInfo = RuntimeInfo{ CallDepth: 1, } // LogSink represents a logging implementation. End-users will generally not // interact with this type. type LogSink interface { // Init receives optional information about the logr library for LogSink // implementations that need it. Init(info RuntimeInfo) // Enabled tests whether this LogSink is enabled at the specified V-level. // For example, commandline flags might be used to set the logging // verbosity and disable some info logs. Enabled(level int) bool // Info logs a non-error message with the given key/value pairs as context. // The level argument is provided for optional logging. This method will // only be called when Enabled(level) is true. See Logger.Info for more // details. Info(level int, msg string, keysAndValues ...any) // Error logs an error, with the given message and key/value pairs as // context. See Logger.Error for more details. Error(err error, msg string, keysAndValues ...any) // WithValues returns a new LogSink with additional key/value pairs. See // Logger.WithValues for more details. WithValues(keysAndValues ...any) LogSink // WithName returns a new LogSink with the specified name appended. See // Logger.WithName for more details. WithName(name string) LogSink } // CallDepthLogSink represents a LogSink that knows how to climb the call stack // to identify the original call site and can offset the depth by a specified // number of frames. This is useful for users who have helper functions // between the "real" call site and the actual calls to Logger methods. // Implementations that log information about the call site (such as file, // function, or line) would otherwise log information about the intermediate // helper functions. // // This is an optional interface and implementations are not required to // support it. type CallDepthLogSink interface { // WithCallDepth returns a LogSink that will offset the call // stack by the specified number of frames when logging call // site information. // // If depth is 0, the LogSink should skip exactly the number // of call frames defined in RuntimeInfo.CallDepth when Info // or Error are called, i.e. the attribution should be to the // direct caller of Logger.Info or Logger.Error. // // If depth is 1 the attribution should skip 1 call frame, and so on. // Successive calls to this are additive. WithCallDepth(depth int) LogSink } // CallStackHelperLogSink represents a LogSink that knows how to climb // the call stack to identify the original call site and can skip // intermediate helper functions if they mark themselves as // helper. Go's testing package uses that approach. // // This is useful for users who have helper functions between the // "real" call site and the actual calls to Logger methods. // Implementations that log information about the call site (such as // file, function, or line) would otherwise log information about the // intermediate helper functions. // // This is an optional interface and implementations are not required // to support it. Implementations that choose to support this must not // simply implement it as WithCallDepth(1), because // Logger.WithCallStackHelper will call both methods if they are // present. This should only be implemented for LogSinks that actually // need it, as with testing.T. type CallStackHelperLogSink interface { // GetCallStackHelper returns a function that must be called // to mark the direct caller as helper function when logging // call site information. GetCallStackHelper() func() } // Marshaler is an optional interface that logged values may choose to // implement. Loggers with structured output, such as JSON, should // log the object return by the MarshalLog method instead of the // original value. type Marshaler interface { // MarshalLog can be used to: // - ensure that structs are not logged as strings when the original // value has a String method: return a different type without a // String method // - select which fields of a complex type should get logged: // return a simpler struct with fewer fields // - log unexported fields: return a different struct // with exported fields // // It may return any value of any type. MarshalLog() any }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/go-logr/logr/sloghandler.go
vendor/github.com/go-logr/logr/sloghandler.go
//go:build go1.21 // +build go1.21 /* Copyright 2023 The logr Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package logr import ( "context" "log/slog" ) type slogHandler struct { // May be nil, in which case all logs get discarded. sink LogSink // Non-nil if sink is non-nil and implements SlogSink. slogSink SlogSink // groupPrefix collects values from WithGroup calls. It gets added as // prefix to value keys when handling a log record. groupPrefix string // levelBias can be set when constructing the handler to influence the // slog.Level of log records. A positive levelBias reduces the // slog.Level value. slog has no API to influence this value after the // handler got created, so it can only be set indirectly through // Logger.V. levelBias slog.Level } var _ slog.Handler = &slogHandler{} // groupSeparator is used to concatenate WithGroup names and attribute keys. const groupSeparator = "." // GetLevel is used for black box unit testing. func (l *slogHandler) GetLevel() slog.Level { return l.levelBias } func (l *slogHandler) Enabled(_ context.Context, level slog.Level) bool { return l.sink != nil && (level >= slog.LevelError || l.sink.Enabled(l.levelFromSlog(level))) } func (l *slogHandler) Handle(ctx context.Context, record slog.Record) error { if l.slogSink != nil { // Only adjust verbosity level of log entries < slog.LevelError. if record.Level < slog.LevelError { record.Level -= l.levelBias } return l.slogSink.Handle(ctx, record) } // No need to check for nil sink here because Handle will only be called // when Enabled returned true. kvList := make([]any, 0, 2*record.NumAttrs()) record.Attrs(func(attr slog.Attr) bool { kvList = attrToKVs(attr, l.groupPrefix, kvList) return true }) if record.Level >= slog.LevelError { l.sinkWithCallDepth().Error(nil, record.Message, kvList...) } else { level := l.levelFromSlog(record.Level) l.sinkWithCallDepth().Info(level, record.Message, kvList...) } return nil } // sinkWithCallDepth adjusts the stack unwinding so that when Error or Info // are called by Handle, code in slog gets skipped. // // This offset currently (Go 1.21.0) works for calls through // slog.New(ToSlogHandler(...)). There's no guarantee that the call // chain won't change. Wrapping the handler will also break unwinding. It's // still better than not adjusting at all.... // // This cannot be done when constructing the handler because FromSlogHandler needs // access to the original sink without this adjustment. A second copy would // work, but then WithAttrs would have to be called for both of them. func (l *slogHandler) sinkWithCallDepth() LogSink { if sink, ok := l.sink.(CallDepthLogSink); ok { return sink.WithCallDepth(2) } return l.sink } func (l *slogHandler) WithAttrs(attrs []slog.Attr) slog.Handler { if l.sink == nil || len(attrs) == 0 { return l } clone := *l if l.slogSink != nil { clone.slogSink = l.slogSink.WithAttrs(attrs) clone.sink = clone.slogSink } else { kvList := make([]any, 0, 2*len(attrs)) for _, attr := range attrs { kvList = attrToKVs(attr, l.groupPrefix, kvList) } clone.sink = l.sink.WithValues(kvList...) } return &clone } func (l *slogHandler) WithGroup(name string) slog.Handler { if l.sink == nil { return l } if name == "" { // slog says to inline empty groups return l } clone := *l if l.slogSink != nil { clone.slogSink = l.slogSink.WithGroup(name) clone.sink = clone.slogSink } else { clone.groupPrefix = addPrefix(clone.groupPrefix, name) } return &clone } // attrToKVs appends a slog.Attr to a logr-style kvList. It handle slog Groups // and other details of slog. func attrToKVs(attr slog.Attr, groupPrefix string, kvList []any) []any { attrVal := attr.Value.Resolve() if attrVal.Kind() == slog.KindGroup { groupVal := attrVal.Group() grpKVs := make([]any, 0, 2*len(groupVal)) prefix := groupPrefix if attr.Key != "" { prefix = addPrefix(groupPrefix, attr.Key) } for _, attr := range groupVal { grpKVs = attrToKVs(attr, prefix, grpKVs) } kvList = append(kvList, grpKVs...) } else if attr.Key != "" { kvList = append(kvList, addPrefix(groupPrefix, attr.Key), attrVal.Any()) } return kvList } func addPrefix(prefix, name string) string { if prefix == "" { return name } if name == "" { return prefix } return prefix + groupSeparator + name } // levelFromSlog adjusts the level by the logger's verbosity and negates it. // It ensures that the result is >= 0. This is necessary because the result is // passed to a LogSink and that API did not historically document whether // levels could be negative or what that meant. // // Some example usage: // // logrV0 := getMyLogger() // logrV2 := logrV0.V(2) // slogV2 := slog.New(logr.ToSlogHandler(logrV2)) // slogV2.Debug("msg") // =~ logrV2.V(4) =~ logrV0.V(6) // slogV2.Info("msg") // =~ logrV2.V(0) =~ logrV0.V(2) // slogv2.Warn("msg") // =~ logrV2.V(-4) =~ logrV0.V(0) func (l *slogHandler) levelFromSlog(level slog.Level) int { result := -level result += l.levelBias // in case the original Logger had a V level if result < 0 { result = 0 // because LogSink doesn't expect negative V levels } return int(result) }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/go-logr/logr/discard.go
vendor/github.com/go-logr/logr/discard.go
/* Copyright 2020 The logr Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package logr // Discard returns a Logger that discards all messages logged to it. It can be // used whenever the caller is not interested in the logs. Logger instances // produced by this function always compare as equal. func Discard() Logger { return New(nil) }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/go-logr/logr/context_noslog.go
vendor/github.com/go-logr/logr/context_noslog.go
//go:build !go1.21 // +build !go1.21 /* Copyright 2019 The logr Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package logr import ( "context" ) // FromContext returns a Logger from ctx or an error if no Logger is found. func FromContext(ctx context.Context) (Logger, error) { if v, ok := ctx.Value(contextKey{}).(Logger); ok { return v, nil } return Logger{}, notFoundError{} } // FromContextOrDiscard returns a Logger from ctx. If no Logger is found, this // returns a Logger that discards all log messages. func FromContextOrDiscard(ctx context.Context) Logger { if v, ok := ctx.Value(contextKey{}).(Logger); ok { return v } return Discard() } // NewContext returns a new Context, derived from ctx, which carries the // provided Logger. func NewContext(ctx context.Context, logger Logger) context.Context { return context.WithValue(ctx, contextKey{}, logger) }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/go-logr/logr/slogsink.go
vendor/github.com/go-logr/logr/slogsink.go
//go:build go1.21 // +build go1.21 /* Copyright 2023 The logr Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package logr import ( "context" "log/slog" "runtime" "time" ) var ( _ LogSink = &slogSink{} _ CallDepthLogSink = &slogSink{} _ Underlier = &slogSink{} ) // Underlier is implemented by the LogSink returned by NewFromLogHandler. type Underlier interface { // GetUnderlying returns the Handler used by the LogSink. GetUnderlying() slog.Handler } const ( // nameKey is used to log the `WithName` values as an additional attribute. nameKey = "logger" // errKey is used to log the error parameter of Error as an additional attribute. errKey = "err" ) type slogSink struct { callDepth int name string handler slog.Handler } func (l *slogSink) Init(info RuntimeInfo) { l.callDepth = info.CallDepth } func (l *slogSink) GetUnderlying() slog.Handler { return l.handler } func (l *slogSink) WithCallDepth(depth int) LogSink { newLogger := *l newLogger.callDepth += depth return &newLogger } func (l *slogSink) Enabled(level int) bool { return l.handler.Enabled(context.Background(), slog.Level(-level)) } func (l *slogSink) Info(level int, msg string, kvList ...interface{}) { l.log(nil, msg, slog.Level(-level), kvList...) } func (l *slogSink) Error(err error, msg string, kvList ...interface{}) { l.log(err, msg, slog.LevelError, kvList...) } func (l *slogSink) log(err error, msg string, level slog.Level, kvList ...interface{}) { var pcs [1]uintptr // skip runtime.Callers, this function, Info/Error, and all helper functions above that. runtime.Callers(3+l.callDepth, pcs[:]) record := slog.NewRecord(time.Now(), level, msg, pcs[0]) if l.name != "" { record.AddAttrs(slog.String(nameKey, l.name)) } if err != nil { record.AddAttrs(slog.Any(errKey, err)) } record.Add(kvList...) _ = l.handler.Handle(context.Background(), record) } func (l slogSink) WithName(name string) LogSink { if l.name != "" { l.name += "/" } l.name += name return &l } func (l slogSink) WithValues(kvList ...interface{}) LogSink { l.handler = l.handler.WithAttrs(kvListToAttrs(kvList...)) return &l } func kvListToAttrs(kvList ...interface{}) []slog.Attr { // We don't need the record itself, only its Add method. record := slog.NewRecord(time.Time{}, 0, "", 0) record.Add(kvList...) attrs := make([]slog.Attr, 0, record.NumAttrs()) record.Attrs(func(attr slog.Attr) bool { attrs = append(attrs, attr) return true }) return attrs }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/go-logr/logr/context_slog.go
vendor/github.com/go-logr/logr/context_slog.go
//go:build go1.21 // +build go1.21 /* Copyright 2019 The logr Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package logr import ( "context" "fmt" "log/slog" ) // FromContext returns a Logger from ctx or an error if no Logger is found. func FromContext(ctx context.Context) (Logger, error) { v := ctx.Value(contextKey{}) if v == nil { return Logger{}, notFoundError{} } switch v := v.(type) { case Logger: return v, nil case *slog.Logger: return FromSlogHandler(v.Handler()), nil default: // Not reached. panic(fmt.Sprintf("unexpected value type for logr context key: %T", v)) } } // FromContextAsSlogLogger returns a slog.Logger from ctx or nil if no such Logger is found. func FromContextAsSlogLogger(ctx context.Context) *slog.Logger { v := ctx.Value(contextKey{}) if v == nil { return nil } switch v := v.(type) { case Logger: return slog.New(ToSlogHandler(v)) case *slog.Logger: return v default: // Not reached. panic(fmt.Sprintf("unexpected value type for logr context key: %T", v)) } } // FromContextOrDiscard returns a Logger from ctx. If no Logger is found, this // returns a Logger that discards all log messages. func FromContextOrDiscard(ctx context.Context) Logger { if logger, err := FromContext(ctx); err == nil { return logger } return Discard() } // NewContext returns a new Context, derived from ctx, which carries the // provided Logger. func NewContext(ctx context.Context, logger Logger) context.Context { return context.WithValue(ctx, contextKey{}, logger) } // NewContextWithSlogLogger returns a new Context, derived from ctx, which carries the // provided slog.Logger. func NewContextWithSlogLogger(ctx context.Context, logger *slog.Logger) context.Context { return context.WithValue(ctx, contextKey{}, logger) }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/go-logr/logr/context.go
vendor/github.com/go-logr/logr/context.go
/* Copyright 2023 The logr Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package logr // contextKey is how we find Loggers in a context.Context. With Go < 1.21, // the value is always a Logger value. With Go >= 1.21, the value can be a // Logger value or a slog.Logger pointer. type contextKey struct{} // notFoundError exists to carry an IsNotFound method. type notFoundError struct{} func (notFoundError) Error() string { return "no logr.Logger was present" } func (notFoundError) IsNotFound() bool { return true }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/go-logr/logr/slogr.go
vendor/github.com/go-logr/logr/slogr.go
//go:build go1.21 // +build go1.21 /* Copyright 2023 The logr Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package logr import ( "context" "log/slog" ) // FromSlogHandler returns a Logger which writes to the slog.Handler. // // The logr verbosity level is mapped to slog levels such that V(0) becomes // slog.LevelInfo and V(4) becomes slog.LevelDebug. func FromSlogHandler(handler slog.Handler) Logger { if handler, ok := handler.(*slogHandler); ok { if handler.sink == nil { return Discard() } return New(handler.sink).V(int(handler.levelBias)) } return New(&slogSink{handler: handler}) } // ToSlogHandler returns a slog.Handler which writes to the same sink as the Logger. // // The returned logger writes all records with level >= slog.LevelError as // error log entries with LogSink.Error, regardless of the verbosity level of // the Logger: // // logger := <some Logger with 0 as verbosity level> // slog.New(ToSlogHandler(logger.V(10))).Error(...) -> logSink.Error(...) // // The level of all other records gets reduced by the verbosity // level of the Logger and the result is negated. If it happens // to be negative, then it gets replaced by zero because a LogSink // is not expected to handled negative levels: // // slog.New(ToSlogHandler(logger)).Debug(...) -> logger.GetSink().Info(level=4, ...) // slog.New(ToSlogHandler(logger)).Warning(...) -> logger.GetSink().Info(level=0, ...) // slog.New(ToSlogHandler(logger)).Info(...) -> logger.GetSink().Info(level=0, ...) // slog.New(ToSlogHandler(logger.V(4))).Info(...) -> logger.GetSink().Info(level=4, ...) func ToSlogHandler(logger Logger) slog.Handler { if sink, ok := logger.GetSink().(*slogSink); ok && logger.GetV() == 0 { return sink.handler } handler := &slogHandler{sink: logger.GetSink(), levelBias: slog.Level(logger.GetV())} if slogSink, ok := handler.sink.(SlogSink); ok { handler.slogSink = slogSink } return handler } // SlogSink is an optional interface that a LogSink can implement to support // logging through the slog.Logger or slog.Handler APIs better. It then should // also support special slog values like slog.Group. When used as a // slog.Handler, the advantages are: // // - stack unwinding gets avoided in favor of logging the pre-recorded PC, // as intended by slog // - proper grouping of key/value pairs via WithGroup // - verbosity levels > slog.LevelInfo can be recorded // - less overhead // // Both APIs (Logger and slog.Logger/Handler) then are supported equally // well. Developers can pick whatever API suits them better and/or mix // packages which use either API in the same binary with a common logging // implementation. // // This interface is necessary because the type implementing the LogSink // interface cannot also implement the slog.Handler interface due to the // different prototype of the common Enabled method. // // An implementation could support both interfaces in two different types, but then // additional interfaces would be needed to convert between those types in FromSlogHandler // and ToSlogHandler. type SlogSink interface { LogSink Handle(ctx context.Context, record slog.Record) error WithAttrs(attrs []slog.Attr) SlogSink WithGroup(name string) SlogSink }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/go-logr/logr/slogr/slogr.go
vendor/github.com/go-logr/logr/slogr/slogr.go
//go:build go1.21 // +build go1.21 /* Copyright 2023 The logr Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // Package slogr enables usage of a slog.Handler with logr.Logger as front-end // API and of a logr.LogSink through the slog.Handler and thus slog.Logger // APIs. // // See the README in the top-level [./logr] package for a discussion of // interoperability. // // Deprecated: use the main logr package instead. package slogr import ( "log/slog" "github.com/go-logr/logr" ) // NewLogr returns a logr.Logger which writes to the slog.Handler. // // Deprecated: use [logr.FromSlogHandler] instead. func NewLogr(handler slog.Handler) logr.Logger { return logr.FromSlogHandler(handler) } // NewSlogHandler returns a slog.Handler which writes to the same sink as the logr.Logger. // // Deprecated: use [logr.ToSlogHandler] instead. func NewSlogHandler(logger logr.Logger) slog.Handler { return logr.ToSlogHandler(logger) } // ToSlogHandler returns a slog.Handler which writes to the same sink as the logr.Logger. // // Deprecated: use [logr.ToSlogHandler] instead. func ToSlogHandler(logger logr.Logger) slog.Handler { return logr.ToSlogHandler(logger) } // SlogSink is an optional interface that a LogSink can implement to support // logging through the slog.Logger or slog.Handler APIs better. // // Deprecated: use [logr.SlogSink] instead. type SlogSink = logr.SlogSink
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/go-logr/logr/funcr/funcr.go
vendor/github.com/go-logr/logr/funcr/funcr.go
/* Copyright 2021 The logr Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // Package funcr implements formatting of structured log messages and // optionally captures the call site and timestamp. // // The simplest way to use it is via its implementation of a // github.com/go-logr/logr.LogSink with output through an arbitrary // "write" function. See New and NewJSON for details. // // # Custom LogSinks // // For users who need more control, a funcr.Formatter can be embedded inside // your own custom LogSink implementation. This is useful when the LogSink // needs to implement additional methods, for example. // // # Formatting // // This will respect logr.Marshaler, fmt.Stringer, and error interfaces for // values which are being logged. When rendering a struct, funcr will use Go's // standard JSON tags (all except "string"). package funcr import ( "bytes" "encoding" "encoding/json" "fmt" "path/filepath" "reflect" "runtime" "strconv" "strings" "time" "github.com/go-logr/logr" ) // New returns a logr.Logger which is implemented by an arbitrary function. func New(fn func(prefix, args string), opts Options) logr.Logger { return logr.New(newSink(fn, NewFormatter(opts))) } // NewJSON returns a logr.Logger which is implemented by an arbitrary function // and produces JSON output. func NewJSON(fn func(obj string), opts Options) logr.Logger { fnWrapper := func(_, obj string) { fn(obj) } return logr.New(newSink(fnWrapper, NewFormatterJSON(opts))) } // Underlier exposes access to the underlying logging function. Since // callers only have a logr.Logger, they have to know which // implementation is in use, so this interface is less of an // abstraction and more of a way to test type conversion. type Underlier interface { GetUnderlying() func(prefix, args string) } func newSink(fn func(prefix, args string), formatter Formatter) logr.LogSink { l := &fnlogger{ Formatter: formatter, write: fn, } // For skipping fnlogger.Info and fnlogger.Error. l.Formatter.AddCallDepth(1) return l } // Options carries parameters which influence the way logs are generated. type Options struct { // LogCaller tells funcr to add a "caller" key to some or all log lines. // This has some overhead, so some users might not want it. LogCaller MessageClass // LogCallerFunc tells funcr to also log the calling function name. This // has no effect if caller logging is not enabled (see Options.LogCaller). LogCallerFunc bool // LogTimestamp tells funcr to add a "ts" key to log lines. This has some // overhead, so some users might not want it. LogTimestamp bool // TimestampFormat tells funcr how to render timestamps when LogTimestamp // is enabled. If not specified, a default format will be used. For more // details, see docs for Go's time.Layout. TimestampFormat string // LogInfoLevel tells funcr what key to use to log the info level. // If not specified, the info level will be logged as "level". // If this is set to "", the info level will not be logged at all. LogInfoLevel *string // Verbosity tells funcr which V logs to produce. Higher values enable // more logs. Info logs at or below this level will be written, while logs // above this level will be discarded. Verbosity int // RenderBuiltinsHook allows users to mutate the list of key-value pairs // while a log line is being rendered. The kvList argument follows logr // conventions - each pair of slice elements is comprised of a string key // and an arbitrary value (verified and sanitized before calling this // hook). The value returned must follow the same conventions. This hook // can be used to audit or modify logged data. For example, you might want // to prefix all of funcr's built-in keys with some string. This hook is // only called for built-in (provided by funcr itself) key-value pairs. // Equivalent hooks are offered for key-value pairs saved via // logr.Logger.WithValues or Formatter.AddValues (see RenderValuesHook) and // for user-provided pairs (see RenderArgsHook). RenderBuiltinsHook func(kvList []any) []any // RenderValuesHook is the same as RenderBuiltinsHook, except that it is // only called for key-value pairs saved via logr.Logger.WithValues. See // RenderBuiltinsHook for more details. RenderValuesHook func(kvList []any) []any // RenderArgsHook is the same as RenderBuiltinsHook, except that it is only // called for key-value pairs passed directly to Info and Error. See // RenderBuiltinsHook for more details. RenderArgsHook func(kvList []any) []any // MaxLogDepth tells funcr how many levels of nested fields (e.g. a struct // that contains a struct, etc.) it may log. Every time it finds a struct, // slice, array, or map the depth is increased by one. When the maximum is // reached, the value will be converted to a string indicating that the max // depth has been exceeded. If this field is not specified, a default // value will be used. MaxLogDepth int } // MessageClass indicates which category or categories of messages to consider. type MessageClass int const ( // None ignores all message classes. None MessageClass = iota // All considers all message classes. All // Info only considers info messages. Info // Error only considers error messages. Error ) // fnlogger inherits some of its LogSink implementation from Formatter // and just needs to add some glue code. type fnlogger struct { Formatter write func(prefix, args string) } func (l fnlogger) WithName(name string) logr.LogSink { l.Formatter.AddName(name) return &l } func (l fnlogger) WithValues(kvList ...any) logr.LogSink { l.Formatter.AddValues(kvList) return &l } func (l fnlogger) WithCallDepth(depth int) logr.LogSink { l.Formatter.AddCallDepth(depth) return &l } func (l fnlogger) Info(level int, msg string, kvList ...any) { prefix, args := l.FormatInfo(level, msg, kvList) l.write(prefix, args) } func (l fnlogger) Error(err error, msg string, kvList ...any) { prefix, args := l.FormatError(err, msg, kvList) l.write(prefix, args) } func (l fnlogger) GetUnderlying() func(prefix, args string) { return l.write } // Assert conformance to the interfaces. var _ logr.LogSink = &fnlogger{} var _ logr.CallDepthLogSink = &fnlogger{} var _ Underlier = &fnlogger{} // NewFormatter constructs a Formatter which emits a JSON-like key=value format. func NewFormatter(opts Options) Formatter { return newFormatter(opts, outputKeyValue) } // NewFormatterJSON constructs a Formatter which emits strict JSON. func NewFormatterJSON(opts Options) Formatter { return newFormatter(opts, outputJSON) } // Defaults for Options. const defaultTimestampFormat = "2006-01-02 15:04:05.000000" const defaultMaxLogDepth = 16 func newFormatter(opts Options, outfmt outputFormat) Formatter { if opts.TimestampFormat == "" { opts.TimestampFormat = defaultTimestampFormat } if opts.MaxLogDepth == 0 { opts.MaxLogDepth = defaultMaxLogDepth } if opts.LogInfoLevel == nil { opts.LogInfoLevel = new(string) *opts.LogInfoLevel = "level" } f := Formatter{ outputFormat: outfmt, prefix: "", values: nil, depth: 0, opts: &opts, } return f } // Formatter is an opaque struct which can be embedded in a LogSink // implementation. It should be constructed with NewFormatter. Some of // its methods directly implement logr.LogSink. type Formatter struct { outputFormat outputFormat prefix string values []any valuesStr string depth int opts *Options groupName string // for slog groups groups []groupDef } // outputFormat indicates which outputFormat to use. type outputFormat int const ( // outputKeyValue emits a JSON-like key=value format, but not strict JSON. outputKeyValue outputFormat = iota // outputJSON emits strict JSON. outputJSON ) // groupDef represents a saved group. The values may be empty, but we don't // know if we need to render the group until the final record is rendered. type groupDef struct { name string values string } // PseudoStruct is a list of key-value pairs that gets logged as a struct. type PseudoStruct []any // render produces a log line, ready to use. func (f Formatter) render(builtins, args []any) string { // Empirically bytes.Buffer is faster than strings.Builder for this. buf := bytes.NewBuffer(make([]byte, 0, 1024)) if f.outputFormat == outputJSON { buf.WriteByte('{') // for the whole record } // Render builtins vals := builtins if hook := f.opts.RenderBuiltinsHook; hook != nil { vals = hook(f.sanitize(vals)) } f.flatten(buf, vals, false) // keys are ours, no need to escape continuing := len(builtins) > 0 // Turn the inner-most group into a string argsStr := func() string { buf := bytes.NewBuffer(make([]byte, 0, 1024)) vals = args if hook := f.opts.RenderArgsHook; hook != nil { vals = hook(f.sanitize(vals)) } f.flatten(buf, vals, true) // escape user-provided keys return buf.String() }() // Render the stack of groups from the inside out. bodyStr := f.renderGroup(f.groupName, f.valuesStr, argsStr) for i := len(f.groups) - 1; i >= 0; i-- { grp := &f.groups[i] if grp.values == "" && bodyStr == "" { // no contents, so we must elide the whole group continue } bodyStr = f.renderGroup(grp.name, grp.values, bodyStr) } if bodyStr != "" { if continuing { buf.WriteByte(f.comma()) } buf.WriteString(bodyStr) } if f.outputFormat == outputJSON { buf.WriteByte('}') // for the whole record } return buf.String() } // renderGroup returns a string representation of the named group with rendered // values and args. If the name is empty, this will return the values and args, // joined. If the name is not empty, this will return a single key-value pair, // where the value is a grouping of the values and args. If the values and // args are both empty, this will return an empty string, even if the name was // specified. func (f Formatter) renderGroup(name string, values string, args string) string { buf := bytes.NewBuffer(make([]byte, 0, 1024)) needClosingBrace := false if name != "" && (values != "" || args != "") { buf.WriteString(f.quoted(name, true)) // escape user-provided keys buf.WriteByte(f.colon()) buf.WriteByte('{') needClosingBrace = true } continuing := false if values != "" { buf.WriteString(values) continuing = true } if args != "" { if continuing { buf.WriteByte(f.comma()) } buf.WriteString(args) } if needClosingBrace { buf.WriteByte('}') } return buf.String() } // flatten renders a list of key-value pairs into a buffer. If escapeKeys is // true, the keys are assumed to have non-JSON-compatible characters in them // and must be evaluated for escapes. // // This function returns a potentially modified version of kvList, which // ensures that there is a value for every key (adding a value if needed) and // that each key is a string (substituting a key if needed). func (f Formatter) flatten(buf *bytes.Buffer, kvList []any, escapeKeys bool) []any { // This logic overlaps with sanitize() but saves one type-cast per key, // which can be measurable. if len(kvList)%2 != 0 { kvList = append(kvList, noValue) } copied := false for i := 0; i < len(kvList); i += 2 { k, ok := kvList[i].(string) if !ok { if !copied { newList := make([]any, len(kvList)) copy(newList, kvList) kvList = newList copied = true } k = f.nonStringKey(kvList[i]) kvList[i] = k } v := kvList[i+1] if i > 0 { if f.outputFormat == outputJSON { buf.WriteByte(f.comma()) } else { // In theory the format could be something we don't understand. In // practice, we control it, so it won't be. buf.WriteByte(' ') } } buf.WriteString(f.quoted(k, escapeKeys)) buf.WriteByte(f.colon()) buf.WriteString(f.pretty(v)) } return kvList } func (f Formatter) quoted(str string, escape bool) string { if escape { return prettyString(str) } // this is faster return `"` + str + `"` } func (f Formatter) comma() byte { if f.outputFormat == outputJSON { return ',' } return ' ' } func (f Formatter) colon() byte { if f.outputFormat == outputJSON { return ':' } return '=' } func (f Formatter) pretty(value any) string { return f.prettyWithFlags(value, 0, 0) } const ( flagRawStruct = 0x1 // do not print braces on structs ) // TODO: This is not fast. Most of the overhead goes here. func (f Formatter) prettyWithFlags(value any, flags uint32, depth int) string { if depth > f.opts.MaxLogDepth { return `"<max-log-depth-exceeded>"` } // Handle types that take full control of logging. if v, ok := value.(logr.Marshaler); ok { // Replace the value with what the type wants to get logged. // That then gets handled below via reflection. value = invokeMarshaler(v) } // Handle types that want to format themselves. switch v := value.(type) { case fmt.Stringer: value = invokeStringer(v) case error: value = invokeError(v) } // Handling the most common types without reflect is a small perf win. switch v := value.(type) { case bool: return strconv.FormatBool(v) case string: return prettyString(v) case int: return strconv.FormatInt(int64(v), 10) case int8: return strconv.FormatInt(int64(v), 10) case int16: return strconv.FormatInt(int64(v), 10) case int32: return strconv.FormatInt(int64(v), 10) case int64: return strconv.FormatInt(int64(v), 10) case uint: return strconv.FormatUint(uint64(v), 10) case uint8: return strconv.FormatUint(uint64(v), 10) case uint16: return strconv.FormatUint(uint64(v), 10) case uint32: return strconv.FormatUint(uint64(v), 10) case uint64: return strconv.FormatUint(v, 10) case uintptr: return strconv.FormatUint(uint64(v), 10) case float32: return strconv.FormatFloat(float64(v), 'f', -1, 32) case float64: return strconv.FormatFloat(v, 'f', -1, 64) case complex64: return `"` + strconv.FormatComplex(complex128(v), 'f', -1, 64) + `"` case complex128: return `"` + strconv.FormatComplex(v, 'f', -1, 128) + `"` case PseudoStruct: buf := bytes.NewBuffer(make([]byte, 0, 1024)) v = f.sanitize(v) if flags&flagRawStruct == 0 { buf.WriteByte('{') } for i := 0; i < len(v); i += 2 { if i > 0 { buf.WriteByte(f.comma()) } k, _ := v[i].(string) // sanitize() above means no need to check success // arbitrary keys might need escaping buf.WriteString(prettyString(k)) buf.WriteByte(f.colon()) buf.WriteString(f.prettyWithFlags(v[i+1], 0, depth+1)) } if flags&flagRawStruct == 0 { buf.WriteByte('}') } return buf.String() } buf := bytes.NewBuffer(make([]byte, 0, 256)) t := reflect.TypeOf(value) if t == nil { return "null" } v := reflect.ValueOf(value) switch t.Kind() { case reflect.Bool: return strconv.FormatBool(v.Bool()) case reflect.String: return prettyString(v.String()) case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: return strconv.FormatInt(int64(v.Int()), 10) case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: return strconv.FormatUint(uint64(v.Uint()), 10) case reflect.Float32: return strconv.FormatFloat(float64(v.Float()), 'f', -1, 32) case reflect.Float64: return strconv.FormatFloat(v.Float(), 'f', -1, 64) case reflect.Complex64: return `"` + strconv.FormatComplex(complex128(v.Complex()), 'f', -1, 64) + `"` case reflect.Complex128: return `"` + strconv.FormatComplex(v.Complex(), 'f', -1, 128) + `"` case reflect.Struct: if flags&flagRawStruct == 0 { buf.WriteByte('{') } printComma := false // testing i>0 is not enough because of JSON omitted fields for i := 0; i < t.NumField(); i++ { fld := t.Field(i) if fld.PkgPath != "" { // reflect says this field is only defined for non-exported fields. continue } if !v.Field(i).CanInterface() { // reflect isn't clear exactly what this means, but we can't use it. continue } name := "" omitempty := false if tag, found := fld.Tag.Lookup("json"); found { if tag == "-" { continue } if comma := strings.Index(tag, ","); comma != -1 { if n := tag[:comma]; n != "" { name = n } rest := tag[comma:] if strings.Contains(rest, ",omitempty,") || strings.HasSuffix(rest, ",omitempty") { omitempty = true } } else { name = tag } } if omitempty && isEmpty(v.Field(i)) { continue } if printComma { buf.WriteByte(f.comma()) } printComma = true // if we got here, we are rendering a field if fld.Anonymous && fld.Type.Kind() == reflect.Struct && name == "" { buf.WriteString(f.prettyWithFlags(v.Field(i).Interface(), flags|flagRawStruct, depth+1)) continue } if name == "" { name = fld.Name } // field names can't contain characters which need escaping buf.WriteString(f.quoted(name, false)) buf.WriteByte(f.colon()) buf.WriteString(f.prettyWithFlags(v.Field(i).Interface(), 0, depth+1)) } if flags&flagRawStruct == 0 { buf.WriteByte('}') } return buf.String() case reflect.Slice, reflect.Array: // If this is outputing as JSON make sure this isn't really a json.RawMessage. // If so just emit "as-is" and don't pretty it as that will just print // it as [X,Y,Z,...] which isn't terribly useful vs the string form you really want. if f.outputFormat == outputJSON { if rm, ok := value.(json.RawMessage); ok { // If it's empty make sure we emit an empty value as the array style would below. if len(rm) > 0 { buf.Write(rm) } else { buf.WriteString("null") } return buf.String() } } buf.WriteByte('[') for i := 0; i < v.Len(); i++ { if i > 0 { buf.WriteByte(f.comma()) } e := v.Index(i) buf.WriteString(f.prettyWithFlags(e.Interface(), 0, depth+1)) } buf.WriteByte(']') return buf.String() case reflect.Map: buf.WriteByte('{') // This does not sort the map keys, for best perf. it := v.MapRange() i := 0 for it.Next() { if i > 0 { buf.WriteByte(f.comma()) } // If a map key supports TextMarshaler, use it. keystr := "" if m, ok := it.Key().Interface().(encoding.TextMarshaler); ok { txt, err := m.MarshalText() if err != nil { keystr = fmt.Sprintf("<error-MarshalText: %s>", err.Error()) } else { keystr = string(txt) } keystr = prettyString(keystr) } else { // prettyWithFlags will produce already-escaped values keystr = f.prettyWithFlags(it.Key().Interface(), 0, depth+1) if t.Key().Kind() != reflect.String { // JSON only does string keys. Unlike Go's standard JSON, we'll // convert just about anything to a string. keystr = prettyString(keystr) } } buf.WriteString(keystr) buf.WriteByte(f.colon()) buf.WriteString(f.prettyWithFlags(it.Value().Interface(), 0, depth+1)) i++ } buf.WriteByte('}') return buf.String() case reflect.Ptr, reflect.Interface: if v.IsNil() { return "null" } return f.prettyWithFlags(v.Elem().Interface(), 0, depth) } return fmt.Sprintf(`"<unhandled-%s>"`, t.Kind().String()) } func prettyString(s string) string { // Avoid escaping (which does allocations) if we can. if needsEscape(s) { return strconv.Quote(s) } b := bytes.NewBuffer(make([]byte, 0, 1024)) b.WriteByte('"') b.WriteString(s) b.WriteByte('"') return b.String() } // needsEscape determines whether the input string needs to be escaped or not, // without doing any allocations. func needsEscape(s string) bool { for _, r := range s { if !strconv.IsPrint(r) || r == '\\' || r == '"' { return true } } return false } func isEmpty(v reflect.Value) bool { switch v.Kind() { case reflect.Array, reflect.Map, reflect.Slice, reflect.String: return v.Len() == 0 case reflect.Bool: return !v.Bool() case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: return v.Int() == 0 case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: return v.Uint() == 0 case reflect.Float32, reflect.Float64: return v.Float() == 0 case reflect.Complex64, reflect.Complex128: return v.Complex() == 0 case reflect.Interface, reflect.Ptr: return v.IsNil() } return false } func invokeMarshaler(m logr.Marshaler) (ret any) { defer func() { if r := recover(); r != nil { ret = fmt.Sprintf("<panic: %s>", r) } }() return m.MarshalLog() } func invokeStringer(s fmt.Stringer) (ret string) { defer func() { if r := recover(); r != nil { ret = fmt.Sprintf("<panic: %s>", r) } }() return s.String() } func invokeError(e error) (ret string) { defer func() { if r := recover(); r != nil { ret = fmt.Sprintf("<panic: %s>", r) } }() return e.Error() } // Caller represents the original call site for a log line, after considering // logr.Logger.WithCallDepth and logr.Logger.WithCallStackHelper. The File and // Line fields will always be provided, while the Func field is optional. // Users can set the render hook fields in Options to examine logged key-value // pairs, one of which will be {"caller", Caller} if the Options.LogCaller // field is enabled for the given MessageClass. type Caller struct { // File is the basename of the file for this call site. File string `json:"file"` // Line is the line number in the file for this call site. Line int `json:"line"` // Func is the function name for this call site, or empty if // Options.LogCallerFunc is not enabled. Func string `json:"function,omitempty"` } func (f Formatter) caller() Caller { // +1 for this frame, +1 for Info/Error. pc, file, line, ok := runtime.Caller(f.depth + 2) if !ok { return Caller{"<unknown>", 0, ""} } fn := "" if f.opts.LogCallerFunc { if fp := runtime.FuncForPC(pc); fp != nil { fn = fp.Name() } } return Caller{filepath.Base(file), line, fn} } const noValue = "<no-value>" func (f Formatter) nonStringKey(v any) string { return fmt.Sprintf("<non-string-key: %s>", f.snippet(v)) } // snippet produces a short snippet string of an arbitrary value. func (f Formatter) snippet(v any) string { const snipLen = 16 snip := f.pretty(v) if len(snip) > snipLen { snip = snip[:snipLen] } return snip } // sanitize ensures that a list of key-value pairs has a value for every key // (adding a value if needed) and that each key is a string (substituting a key // if needed). func (f Formatter) sanitize(kvList []any) []any { if len(kvList)%2 != 0 { kvList = append(kvList, noValue) } for i := 0; i < len(kvList); i += 2 { _, ok := kvList[i].(string) if !ok { kvList[i] = f.nonStringKey(kvList[i]) } } return kvList } // startGroup opens a new group scope (basically a sub-struct), which locks all // the current saved values and starts them anew. This is needed to satisfy // slog. func (f *Formatter) startGroup(name string) { // Unnamed groups are just inlined. if name == "" { return } n := len(f.groups) f.groups = append(f.groups[:n:n], groupDef{f.groupName, f.valuesStr}) // Start collecting new values. f.groupName = name f.valuesStr = "" f.values = nil } // Init configures this Formatter from runtime info, such as the call depth // imposed by logr itself. // Note that this receiver is a pointer, so depth can be saved. func (f *Formatter) Init(info logr.RuntimeInfo) { f.depth += info.CallDepth } // Enabled checks whether an info message at the given level should be logged. func (f Formatter) Enabled(level int) bool { return level <= f.opts.Verbosity } // GetDepth returns the current depth of this Formatter. This is useful for // implementations which do their own caller attribution. func (f Formatter) GetDepth() int { return f.depth } // FormatInfo renders an Info log message into strings. The prefix will be // empty when no names were set (via AddNames), or when the output is // configured for JSON. func (f Formatter) FormatInfo(level int, msg string, kvList []any) (prefix, argsStr string) { args := make([]any, 0, 64) // using a constant here impacts perf prefix = f.prefix if f.outputFormat == outputJSON { args = append(args, "logger", prefix) prefix = "" } if f.opts.LogTimestamp { args = append(args, "ts", time.Now().Format(f.opts.TimestampFormat)) } if policy := f.opts.LogCaller; policy == All || policy == Info { args = append(args, "caller", f.caller()) } if key := *f.opts.LogInfoLevel; key != "" { args = append(args, key, level) } args = append(args, "msg", msg) return prefix, f.render(args, kvList) } // FormatError renders an Error log message into strings. The prefix will be // empty when no names were set (via AddNames), or when the output is // configured for JSON. func (f Formatter) FormatError(err error, msg string, kvList []any) (prefix, argsStr string) { args := make([]any, 0, 64) // using a constant here impacts perf prefix = f.prefix if f.outputFormat == outputJSON { args = append(args, "logger", prefix) prefix = "" } if f.opts.LogTimestamp { args = append(args, "ts", time.Now().Format(f.opts.TimestampFormat)) } if policy := f.opts.LogCaller; policy == All || policy == Error { args = append(args, "caller", f.caller()) } args = append(args, "msg", msg) var loggableErr any if err != nil { loggableErr = err.Error() } args = append(args, "error", loggableErr) return prefix, f.render(args, kvList) } // AddName appends the specified name. funcr uses '/' characters to separate // name elements. Callers should not pass '/' in the provided name string, but // this library does not actually enforce that. func (f *Formatter) AddName(name string) { if len(f.prefix) > 0 { f.prefix += "/" } f.prefix += name } // AddValues adds key-value pairs to the set of saved values to be logged with // each log line. func (f *Formatter) AddValues(kvList []any) { // Three slice args forces a copy. n := len(f.values) f.values = append(f.values[:n:n], kvList...) vals := f.values if hook := f.opts.RenderValuesHook; hook != nil { vals = hook(f.sanitize(vals)) } // Pre-render values, so we don't have to do it on each Info/Error call. buf := bytes.NewBuffer(make([]byte, 0, 1024)) f.flatten(buf, vals, true) // escape user-provided keys f.valuesStr = buf.String() } // AddCallDepth increases the number of stack-frames to skip when attributing // the log line to a file and line. func (f *Formatter) AddCallDepth(depth int) { f.depth += depth }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/go-logr/logr/funcr/slogsink.go
vendor/github.com/go-logr/logr/funcr/slogsink.go
//go:build go1.21 // +build go1.21 /* Copyright 2023 The logr Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package funcr import ( "context" "log/slog" "github.com/go-logr/logr" ) var _ logr.SlogSink = &fnlogger{} const extraSlogSinkDepth = 3 // 2 for slog, 1 for SlogSink func (l fnlogger) Handle(_ context.Context, record slog.Record) error { kvList := make([]any, 0, 2*record.NumAttrs()) record.Attrs(func(attr slog.Attr) bool { kvList = attrToKVs(attr, kvList) return true }) if record.Level >= slog.LevelError { l.WithCallDepth(extraSlogSinkDepth).Error(nil, record.Message, kvList...) } else { level := l.levelFromSlog(record.Level) l.WithCallDepth(extraSlogSinkDepth).Info(level, record.Message, kvList...) } return nil } func (l fnlogger) WithAttrs(attrs []slog.Attr) logr.SlogSink { kvList := make([]any, 0, 2*len(attrs)) for _, attr := range attrs { kvList = attrToKVs(attr, kvList) } l.AddValues(kvList) return &l } func (l fnlogger) WithGroup(name string) logr.SlogSink { l.startGroup(name) return &l } // attrToKVs appends a slog.Attr to a logr-style kvList. It handle slog Groups // and other details of slog. func attrToKVs(attr slog.Attr, kvList []any) []any { attrVal := attr.Value.Resolve() if attrVal.Kind() == slog.KindGroup { groupVal := attrVal.Group() grpKVs := make([]any, 0, 2*len(groupVal)) for _, attr := range groupVal { grpKVs = attrToKVs(attr, grpKVs) } if attr.Key == "" { // slog says we have to inline these kvList = append(kvList, grpKVs...) } else { kvList = append(kvList, attr.Key, PseudoStruct(grpKVs)) } } else if attr.Key != "" { kvList = append(kvList, attr.Key, attrVal.Any()) } return kvList } // levelFromSlog adjusts the level by the logger's verbosity and negates it. // It ensures that the result is >= 0. This is necessary because the result is // passed to a LogSink and that API did not historically document whether // levels could be negative or what that meant. // // Some example usage: // // logrV0 := getMyLogger() // logrV2 := logrV0.V(2) // slogV2 := slog.New(logr.ToSlogHandler(logrV2)) // slogV2.Debug("msg") // =~ logrV2.V(4) =~ logrV0.V(6) // slogV2.Info("msg") // =~ logrV2.V(0) =~ logrV0.V(2) // slogv2.Warn("msg") // =~ logrV2.V(-4) =~ logrV0.V(0) func (l fnlogger) levelFromSlog(level slog.Level) int { result := -level if result < 0 { result = 0 // because LogSink doesn't expect negative V levels } return int(result) }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/go-logr/zapr/zapr.go
vendor/github.com/go-logr/zapr/zapr.go
/* Copyright 2019 The logr Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // Copyright 2018 Solly Ross // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package zapr defines an implementation of the github.com/go-logr/logr // interfaces built on top of Zap (go.uber.org/zap). // // # Usage // // A new logr.Logger can be constructed from an existing zap.Logger using // the NewLogger function: // // log := zapr.NewLogger(someZapLogger) // // # Implementation Details // // For the most part, concepts in Zap correspond directly with those in // logr. // // Unlike Zap, all fields *must* be in the form of sugared fields -- // it's illegal to pass a strongly-typed Zap field in a key position // to any of the log methods. // // Levels in logr correspond to custom debug levels in Zap. Any given level // in logr is represents by its inverse in zap (`zapLevel = -1*logrLevel`). // For example V(2) is equivalent to log level -2 in Zap, while V(1) is // equivalent to Zap's DebugLevel. package zapr import ( "fmt" "github.com/go-logr/logr" "go.uber.org/zap" "go.uber.org/zap/zapcore" ) // NB: right now, we always use the equivalent of sugared logging. // This is necessary, since logr doesn't define non-suggared types, // and using zap-specific non-suggared types would make uses tied // directly to Zap. // zapLogger is a logr.Logger that uses Zap to log. The level has already been // converted to a Zap level, which is to say that `logrLevel = -1*zapLevel`. type zapLogger struct { // NB: this looks very similar to zap.SugaredLogger, but // deals with our desire to have multiple verbosity levels. l *zap.Logger // numericLevelKey controls whether the numeric logr level is // added to each Info log message and with which key. numericLevelKey string // errorKey is the field name used for the error in // Logger.Error calls. errorKey string // allowZapFields enables logging of strongly-typed Zap // fields. It is off by default because it breaks // implementation agnosticism. allowZapFields bool // panicMessages enables log messages for invalid log calls // that explain why a call was invalid (for example, // non-string key). This is enabled by default. panicMessages bool } const ( // noLevel tells handleFields to not inject a numeric log level field. noLevel = -1 ) // handleFields converts a bunch of arbitrary key-value pairs into Zap fields. It takes // additional pre-converted Zap fields, for use with automatically attached fields, like // `error`. func (zl *zapLogger) handleFields(lvl int, args []interface{}, additional ...zap.Field) []zap.Field { injectNumericLevel := zl.numericLevelKey != "" && lvl != noLevel // a slightly modified version of zap.SugaredLogger.sweetenFields if len(args) == 0 { // fast-return if we have no suggared fields and no "v" field. if !injectNumericLevel { return additional } // Slightly slower fast path when we need to inject "v". return append(additional, zap.Int(zl.numericLevelKey, lvl)) } // unlike Zap, we can be pretty sure users aren't passing structured // fields (since logr has no concept of that), so guess that we need a // little less space. numFields := len(args)/2 + len(additional) if injectNumericLevel { numFields++ } fields := make([]zap.Field, 0, numFields) if injectNumericLevel { fields = append(fields, zap.Int(zl.numericLevelKey, lvl)) } for i := 0; i < len(args); { // Check just in case for strongly-typed Zap fields, // which might be illegal (since it breaks // implementation agnosticism). If disabled, we can // give a better error message. if field, ok := args[i].(zap.Field); ok { if zl.allowZapFields { fields = append(fields, field) i++ continue } if zl.panicMessages { zl.l.WithOptions(zap.AddCallerSkip(1)).DPanic("strongly-typed Zap Field passed to logr", zapIt("zap field", args[i])) } break } // make sure this isn't a mismatched key if i == len(args)-1 { if zl.panicMessages { zl.l.WithOptions(zap.AddCallerSkip(1)).DPanic("odd number of arguments passed as key-value pairs for logging", zapIt("ignored key", args[i])) } break } // process a key-value pair, // ensuring that the key is a string key, val := args[i], args[i+1] keyStr, isString := key.(string) if !isString { // if the key isn't a string, DPanic and stop logging if zl.panicMessages { zl.l.WithOptions(zap.AddCallerSkip(1)).DPanic("non-string key argument passed to logging, ignoring all later arguments", zapIt("invalid key", key)) } break } fields = append(fields, zapIt(keyStr, val)) i += 2 } return append(fields, additional...) } func invokeMarshaler(field string, m logr.Marshaler) (f string, ret interface{}) { defer func() { if r := recover(); r != nil { ret = fmt.Sprintf("PANIC=%s", r) f = field + "Error" } }() return field, m.MarshalLog() } func (zl *zapLogger) Init(ri logr.RuntimeInfo) { zl.l = zl.l.WithOptions(zap.AddCallerSkip(ri.CallDepth)) } // Zap levels are int8 - make sure we stay in bounds. logr itself should // ensure we never get negative values. func toZapLevel(lvl int) zapcore.Level { if lvl > 127 { lvl = 127 } // zap levels are inverted. return 0 - zapcore.Level(lvl) } func (zl zapLogger) Enabled(lvl int) bool { return zl.l.Core().Enabled(toZapLevel(lvl)) } func (zl *zapLogger) Info(lvl int, msg string, keysAndVals ...interface{}) { if checkedEntry := zl.l.Check(toZapLevel(lvl), msg); checkedEntry != nil { checkedEntry.Write(zl.handleFields(lvl, keysAndVals)...) } } func (zl *zapLogger) Error(err error, msg string, keysAndVals ...interface{}) { if checkedEntry := zl.l.Check(zap.ErrorLevel, msg); checkedEntry != nil { checkedEntry.Write(zl.handleFields(noLevel, keysAndVals, zap.NamedError(zl.errorKey, err))...) } } func (zl *zapLogger) WithValues(keysAndValues ...interface{}) logr.LogSink { newLogger := *zl newLogger.l = zl.l.With(zl.handleFields(noLevel, keysAndValues)...) return &newLogger } func (zl *zapLogger) WithName(name string) logr.LogSink { newLogger := *zl newLogger.l = zl.l.Named(name) return &newLogger } func (zl *zapLogger) WithCallDepth(depth int) logr.LogSink { newLogger := *zl newLogger.l = zl.l.WithOptions(zap.AddCallerSkip(depth)) return &newLogger } // Underlier exposes access to the underlying logging implementation. Since // callers only have a logr.Logger, they have to know which implementation is // in use, so this interface is less of an abstraction and more of way to test // type conversion. type Underlier interface { GetUnderlying() *zap.Logger } func (zl *zapLogger) GetUnderlying() *zap.Logger { return zl.l } // NewLogger creates a new logr.Logger using the given Zap Logger to log. func NewLogger(l *zap.Logger) logr.Logger { return NewLoggerWithOptions(l) } // NewLoggerWithOptions creates a new logr.Logger using the given Zap Logger to // log and applies additional options. func NewLoggerWithOptions(l *zap.Logger, opts ...Option) logr.Logger { // creates a new logger skipping one level of callstack log := l.WithOptions(zap.AddCallerSkip(1)) zl := &zapLogger{ l: log, } zl.errorKey = "error" zl.panicMessages = true for _, option := range opts { option(zl) } return logr.New(zl) } // Option is one additional parameter for NewLoggerWithOptions. type Option func(*zapLogger) // LogInfoLevel controls whether a numeric log level is added to // Info log message. The empty string disables this, a non-empty // string is the key for the additional field. Errors and // internal panic messages do not have a log level and thus // are always logged without this extra field. func LogInfoLevel(key string) Option { return func(zl *zapLogger) { zl.numericLevelKey = key } } // ErrorKey replaces the default "error" field name used for the error // in Logger.Error calls. func ErrorKey(key string) Option { return func(zl *zapLogger) { zl.errorKey = key } } // AllowZapFields controls whether strongly-typed Zap fields may // be passed instead of a key/value pair. This is disabled by // default because it breaks implementation agnosticism. func AllowZapFields(allowed bool) Option { return func(zl *zapLogger) { zl.allowZapFields = allowed } } // DPanicOnBugs controls whether extra log messages are emitted for // invalid log calls with zap's DPanic method. Depending on the // configuration of the zap logger, the program then panics after // emitting the log message which is useful in development because // such invalid log calls are bugs in the program. The log messages // explain why a call was invalid (for example, non-string // key). Emitting them is enabled by default. func DPanicOnBugs(enabled bool) Option { return func(zl *zapLogger) { zl.panicMessages = enabled } } var _ logr.LogSink = &zapLogger{} var _ logr.CallDepthLogSink = &zapLogger{}
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/go-logr/zapr/slogzapr.go
vendor/github.com/go-logr/zapr/slogzapr.go
//go:build go1.21 // +build go1.21 /* Copyright 2023 The logr Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package zapr import ( "context" "log/slog" "runtime" "github.com/go-logr/logr/slogr" "go.uber.org/zap" "go.uber.org/zap/zapcore" ) var _ slogr.SlogSink = &zapLogger{} func (zl *zapLogger) Handle(_ context.Context, record slog.Record) error { zapLevel := zap.InfoLevel intLevel := 0 isError := false switch { case record.Level >= slog.LevelError: zapLevel = zap.ErrorLevel isError = true case record.Level >= slog.LevelWarn: zapLevel = zap.WarnLevel case record.Level >= 0: // Already set above -> info. default: zapLevel = zapcore.Level(record.Level) intLevel = int(-zapLevel) } if checkedEntry := zl.l.Check(zapLevel, record.Message); checkedEntry != nil { checkedEntry.Time = record.Time checkedEntry.Caller = pcToCallerEntry(record.PC) var fieldsBuffer [2]zap.Field fields := fieldsBuffer[:0] if !isError && zl.numericLevelKey != "" { // Record verbosity for info entries. fields = append(fields, zap.Int(zl.numericLevelKey, intLevel)) } // Inline all attributes. fields = append(fields, zap.Inline(zapcore.ObjectMarshalerFunc(func(enc zapcore.ObjectEncoder) error { record.Attrs(func(attr slog.Attr) bool { encodeSlog(enc, attr) return true }) return nil }))) checkedEntry.Write(fields...) } return nil } func encodeSlog(enc zapcore.ObjectEncoder, attr slog.Attr) { if attr.Equal(slog.Attr{}) { // Ignore empty attribute. return } // Check in order of expected frequency, most common ones first. // // Usage statistics for parameters from Kubernetes 152876a3e, // calculated with k/k/test/integration/logs/benchmark: // // kube-controller-manager -v10: // strings: 10043 (85%) // with API objects: 2 (0% of all arguments) // types and their number of usage: NodeStatus:2 // numbers: 792 (6%) // ObjectRef: 292 (2%) // others: 595 (5%) // // kube-scheduler -v10: // strings: 1325 (40%) // with API objects: 109 (3% of all arguments) // types and their number of usage: PersistentVolume:50 PersistentVolumeClaim:59 // numbers: 473 (14%) // ObjectRef: 1305 (39%) // others: 176 (5%) kind := attr.Value.Kind() switch kind { case slog.KindString: enc.AddString(attr.Key, attr.Value.String()) case slog.KindLogValuer: // This includes klog.KObj. encodeSlog(enc, slog.Attr{ Key: attr.Key, Value: attr.Value.Resolve(), }) case slog.KindInt64: enc.AddInt64(attr.Key, attr.Value.Int64()) case slog.KindUint64: enc.AddUint64(attr.Key, attr.Value.Uint64()) case slog.KindFloat64: enc.AddFloat64(attr.Key, attr.Value.Float64()) case slog.KindBool: enc.AddBool(attr.Key, attr.Value.Bool()) case slog.KindDuration: enc.AddDuration(attr.Key, attr.Value.Duration()) case slog.KindTime: enc.AddTime(attr.Key, attr.Value.Time()) case slog.KindGroup: attrs := attr.Value.Group() if attr.Key == "" { // Inline group. for _, attr := range attrs { encodeSlog(enc, attr) } return } if len(attrs) == 0 { // Ignore empty group. return } _ = enc.AddObject(attr.Key, marshalAttrs(attrs)) default: // We have to go through reflection in zap.Any to get support // for e.g. fmt.Stringer. zap.Any(attr.Key, attr.Value.Any()).AddTo(enc) } } type marshalAttrs []slog.Attr func (attrs marshalAttrs) MarshalLogObject(enc zapcore.ObjectEncoder) error { for _, attr := range attrs { encodeSlog(enc, attr) } return nil } var _ zapcore.ObjectMarshaler = marshalAttrs(nil) func pcToCallerEntry(pc uintptr) zapcore.EntryCaller { if pc == 0 { return zapcore.EntryCaller{} } // Same as https://cs.opensource.google/go/x/exp/+/642cacee:slog/record.go;drc=642cacee5cc05231f45555a333d07f1005ffc287;l=70 fs := runtime.CallersFrames([]uintptr{pc}) f, _ := fs.Next() if f.File == "" { return zapcore.EntryCaller{} } return zapcore.EntryCaller{ Defined: true, PC: pc, File: f.File, Line: f.Line, Function: f.Function, } } func (zl *zapLogger) WithAttrs(attrs []slog.Attr) slogr.SlogSink { newLogger := *zl newLogger.l = newLogger.l.With(zap.Inline(marshalAttrs(attrs))) return &newLogger } func (zl *zapLogger) WithGroup(name string) slogr.SlogSink { newLogger := *zl newLogger.l = newLogger.l.With(zap.Namespace(name)) return &newLogger }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/go-logr/zapr/zapr_slog.go
vendor/github.com/go-logr/zapr/zapr_slog.go
//go:build go1.21 // +build go1.21 /* Copyright 2023 The logr Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package zapr import ( "log/slog" "github.com/go-logr/logr" "go.uber.org/zap" "go.uber.org/zap/zapcore" ) func zapIt(field string, val interface{}) zap.Field { switch valTyped := val.(type) { case logr.Marshaler: // Handle types that implement logr.Marshaler: log the replacement // object instead of the original one. field, val = invokeMarshaler(field, valTyped) case slog.LogValuer: // The same for slog.LogValuer. We let slog.Value handle // potential panics and recursion. val = slog.AnyValue(val).Resolve() } if slogValue, ok := val.(slog.Value); ok { return zap.Inline(zapcore.ObjectMarshalerFunc(func(enc zapcore.ObjectEncoder) error { encodeSlog(enc, slog.Attr{Key: field, Value: slogValue}) return nil })) } return zap.Any(field, val) }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/go-logr/zapr/zapr_noslog.go
vendor/github.com/go-logr/zapr/zapr_noslog.go
//go:build !go1.21 // +build !go1.21 /* Copyright 2023 The logr Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package zapr import ( "github.com/go-logr/logr" "go.uber.org/zap" ) func zapIt(field string, val interface{}) zap.Field { // Handle types that implement logr.Marshaler: log the replacement // object instead of the original one. if marshaler, ok := val.(logr.Marshaler); ok { field, val = invokeMarshaler(field, marshaler) } return zap.Any(field, val) }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/nxadm/tail/tail.go
vendor/github.com/nxadm/tail/tail.go
// Copyright (c) 2019 FOSS contributors of https://github.com/nxadm/tail // Copyright (c) 2015 HPE Software Inc. All rights reserved. // Copyright (c) 2013 ActiveState Software Inc. All rights reserved. //nxadm/tail provides a Go library that emulates the features of the BSD `tail` //program. The library comes with full support for truncation/move detection as //it is designed to work with log rotation tools. The library works on all //operating systems supported by Go, including POSIX systems like Linux and //*BSD, and MS Windows. Go 1.9 is the oldest compiler release supported. package tail import ( "bufio" "errors" "fmt" "io" "io/ioutil" "log" "os" "strings" "sync" "time" "github.com/nxadm/tail/ratelimiter" "github.com/nxadm/tail/util" "github.com/nxadm/tail/watch" "gopkg.in/tomb.v1" ) var ( // ErrStop is returned when the tail of a file has been marked to be stopped. ErrStop = errors.New("tail should now stop") ) type Line struct { Text string // The contents of the file Num int // The line number SeekInfo SeekInfo // SeekInfo Time time.Time // Present time Err error // Error from tail } // Deprecated: this function is no longer used internally and it has little of no // use in the API. As such, it will be removed from the API in a future major // release. // // NewLine returns a * pointer to a Line struct. func NewLine(text string, lineNum int) *Line { return &Line{text, lineNum, SeekInfo{}, time.Now(), nil} } // SeekInfo represents arguments to io.Seek. See: https://golang.org/pkg/io/#SectionReader.Seek type SeekInfo struct { Offset int64 Whence int } type logger interface { Fatal(v ...interface{}) Fatalf(format string, v ...interface{}) Fatalln(v ...interface{}) Panic(v ...interface{}) Panicf(format string, v ...interface{}) Panicln(v ...interface{}) Print(v ...interface{}) Printf(format string, v ...interface{}) Println(v ...interface{}) } // Config is used to specify how a file must be tailed. type Config struct { // File-specifc Location *SeekInfo // Tail from this location. If nil, start at the beginning of the file ReOpen bool // Reopen recreated files (tail -F) MustExist bool // Fail early if the file does not exist Poll bool // Poll for file changes instead of using the default inotify Pipe bool // The file is a named pipe (mkfifo) // Generic IO Follow bool // Continue looking for new lines (tail -f) MaxLineSize int // If non-zero, split longer lines into multiple lines CompleteLines bool // Only return complete lines (that end with "\n" or EOF when Follow is false) // Optionally, use a ratelimiter (e.g. created by the ratelimiter/NewLeakyBucket function) RateLimiter *ratelimiter.LeakyBucket // Optionally use a Logger. When nil, the Logger is set to tail.DefaultLogger. // To disable logging, set it to tail.DiscardingLogger Logger logger } type Tail struct { Filename string // The filename Lines chan *Line // A consumable channel of *Line Config // Tail.Configuration file *os.File reader *bufio.Reader lineNum int lineBuf *strings.Builder watcher watch.FileWatcher changes *watch.FileChanges tomb.Tomb // provides: Done, Kill, Dying lk sync.Mutex } var ( // DefaultLogger logs to os.Stderr and it is used when Config.Logger == nil DefaultLogger = log.New(os.Stderr, "", log.LstdFlags) // DiscardingLogger can be used to disable logging output DiscardingLogger = log.New(ioutil.Discard, "", 0) ) // TailFile begins tailing the file. And returns a pointer to a Tail struct // and an error. An output stream is made available via the Tail.Lines // channel (e.g. to be looped and printed). To handle errors during tailing, // after finishing reading from the Lines channel, invoke the `Wait` or `Err` // method on the returned *Tail. func TailFile(filename string, config Config) (*Tail, error) { if config.ReOpen && !config.Follow { util.Fatal("cannot set ReOpen without Follow.") } t := &Tail{ Filename: filename, Lines: make(chan *Line), Config: config, } if config.CompleteLines { t.lineBuf = new(strings.Builder) } // when Logger was not specified in config, use default logger if t.Logger == nil { t.Logger = DefaultLogger } if t.Poll { t.watcher = watch.NewPollingFileWatcher(filename) } else { t.watcher = watch.NewInotifyFileWatcher(filename) } if t.MustExist { var err error t.file, err = OpenFile(t.Filename) if err != nil { return nil, err } } go t.tailFileSync() return t, nil } // Tell returns the file's current position, like stdio's ftell() and an error. // Beware that this value may not be completely accurate because one line from // the chan(tail.Lines) may have been read already. func (tail *Tail) Tell() (offset int64, err error) { if tail.file == nil { return } offset, err = tail.file.Seek(0, io.SeekCurrent) if err != nil { return } tail.lk.Lock() defer tail.lk.Unlock() if tail.reader == nil { return } offset -= int64(tail.reader.Buffered()) return } // Stop stops the tailing activity. func (tail *Tail) Stop() error { tail.Kill(nil) return tail.Wait() } // StopAtEOF stops tailing as soon as the end of the file is reached. The function // returns an error, func (tail *Tail) StopAtEOF() error { tail.Kill(errStopAtEOF) return tail.Wait() } var errStopAtEOF = errors.New("tail: stop at eof") func (tail *Tail) close() { close(tail.Lines) tail.closeFile() } func (tail *Tail) closeFile() { if tail.file != nil { tail.file.Close() tail.file = nil } } func (tail *Tail) reopen() error { if tail.lineBuf != nil { tail.lineBuf.Reset() } tail.closeFile() tail.lineNum = 0 for { var err error tail.file, err = OpenFile(tail.Filename) if err != nil { if os.IsNotExist(err) { tail.Logger.Printf("Waiting for %s to appear...", tail.Filename) if err := tail.watcher.BlockUntilExists(&tail.Tomb); err != nil { if err == tomb.ErrDying { return err } return fmt.Errorf("Failed to detect creation of %s: %s", tail.Filename, err) } continue } return fmt.Errorf("Unable to open file %s: %s", tail.Filename, err) } break } return nil } func (tail *Tail) readLine() (string, error) { tail.lk.Lock() line, err := tail.reader.ReadString('\n') tail.lk.Unlock() newlineEnding := strings.HasSuffix(line, "\n") line = strings.TrimRight(line, "\n") // if we don't have to handle incomplete lines, we can return the line as-is if !tail.Config.CompleteLines { // Note ReadString "returns the data read before the error" in // case of an error, including EOF, so we return it as is. The // caller is expected to process it if err is EOF. return line, err } if _, err := tail.lineBuf.WriteString(line); err != nil { return line, err } if newlineEnding { line = tail.lineBuf.String() tail.lineBuf.Reset() return line, nil } else { if tail.Config.Follow { line = "" } return line, io.EOF } } func (tail *Tail) tailFileSync() { defer tail.Done() defer tail.close() if !tail.MustExist { // deferred first open. err := tail.reopen() if err != nil { if err != tomb.ErrDying { tail.Kill(err) } return } } // Seek to requested location on first open of the file. if tail.Location != nil { _, err := tail.file.Seek(tail.Location.Offset, tail.Location.Whence) if err != nil { tail.Killf("Seek error on %s: %s", tail.Filename, err) return } } tail.openReader() // Read line by line. for { // do not seek in named pipes if !tail.Pipe { // grab the position in case we need to back up in the event of a half-line if _, err := tail.Tell(); err != nil { tail.Kill(err) return } } line, err := tail.readLine() // Process `line` even if err is EOF. if err == nil { cooloff := !tail.sendLine(line) if cooloff { // Wait a second before seeking till the end of // file when rate limit is reached. msg := ("Too much log activity; waiting a second before resuming tailing") offset, _ := tail.Tell() tail.Lines <- &Line{msg, tail.lineNum, SeekInfo{Offset: offset}, time.Now(), errors.New(msg)} select { case <-time.After(time.Second): case <-tail.Dying(): return } if err := tail.seekEnd(); err != nil { tail.Kill(err) return } } } else if err == io.EOF { if !tail.Follow { if line != "" { tail.sendLine(line) } return } if tail.Follow && line != "" { tail.sendLine(line) if err := tail.seekEnd(); err != nil { tail.Kill(err) return } } // When EOF is reached, wait for more data to become // available. Wait strategy is based on the `tail.watcher` // implementation (inotify or polling). err := tail.waitForChanges() if err != nil { if err != ErrStop { tail.Kill(err) } return } } else { // non-EOF error tail.Killf("Error reading %s: %s", tail.Filename, err) return } select { case <-tail.Dying(): if tail.Err() == errStopAtEOF { continue } return default: } } } // waitForChanges waits until the file has been appended, deleted, // moved or truncated. When moved or deleted - the file will be // reopened if ReOpen is true. Truncated files are always reopened. func (tail *Tail) waitForChanges() error { if tail.changes == nil { pos, err := tail.file.Seek(0, io.SeekCurrent) if err != nil { return err } tail.changes, err = tail.watcher.ChangeEvents(&tail.Tomb, pos) if err != nil { return err } } select { case <-tail.changes.Modified: return nil case <-tail.changes.Deleted: tail.changes = nil if tail.ReOpen { // XXX: we must not log from a library. tail.Logger.Printf("Re-opening moved/deleted file %s ...", tail.Filename) if err := tail.reopen(); err != nil { return err } tail.Logger.Printf("Successfully reopened %s", tail.Filename) tail.openReader() return nil } tail.Logger.Printf("Stopping tail as file no longer exists: %s", tail.Filename) return ErrStop case <-tail.changes.Truncated: // Always reopen truncated files (Follow is true) tail.Logger.Printf("Re-opening truncated file %s ...", tail.Filename) if err := tail.reopen(); err != nil { return err } tail.Logger.Printf("Successfully reopened truncated %s", tail.Filename) tail.openReader() return nil case <-tail.Dying(): return ErrStop } } func (tail *Tail) openReader() { tail.lk.Lock() if tail.MaxLineSize > 0 { // add 2 to account for newline characters tail.reader = bufio.NewReaderSize(tail.file, tail.MaxLineSize+2) } else { tail.reader = bufio.NewReader(tail.file) } tail.lk.Unlock() } func (tail *Tail) seekEnd() error { return tail.seekTo(SeekInfo{Offset: 0, Whence: io.SeekEnd}) } func (tail *Tail) seekTo(pos SeekInfo) error { _, err := tail.file.Seek(pos.Offset, pos.Whence) if err != nil { return fmt.Errorf("Seek error on %s: %s", tail.Filename, err) } // Reset the read buffer whenever the file is re-seek'ed tail.reader.Reset(tail.file) return nil } // sendLine sends the line(s) to Lines channel, splitting longer lines // if necessary. Return false if rate limit is reached. func (tail *Tail) sendLine(line string) bool { now := time.Now() lines := []string{line} // Split longer lines if tail.MaxLineSize > 0 && len(line) > tail.MaxLineSize { lines = util.PartitionString(line, tail.MaxLineSize) } for _, line := range lines { tail.lineNum++ offset, _ := tail.Tell() select { case tail.Lines <- &Line{line, tail.lineNum, SeekInfo{Offset: offset}, now, nil}: case <-tail.Dying(): return true } } if tail.Config.RateLimiter != nil { ok := tail.Config.RateLimiter.Pour(uint16(len(lines))) if !ok { tail.Logger.Printf("Leaky bucket full (%v); entering 1s cooloff period.", tail.Filename) return false } } return true } // Cleanup removes inotify watches added by the tail package. This function is // meant to be invoked from a process's exit handler. Linux kernel may not // automatically remove inotify watches after the process exits. // If you plan to re-read a file, don't call Cleanup in between. func (tail *Tail) Cleanup() { watch.Cleanup(tail.Filename) }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/nxadm/tail/tail_posix.go
vendor/github.com/nxadm/tail/tail_posix.go
// Copyright (c) 2019 FOSS contributors of https://github.com/nxadm/tail // +build !windows package tail import ( "os" ) // Deprecated: this function is only useful internally and, as such, // it will be removed from the API in a future major release. // // OpenFile proxies a os.Open call for a file so it can be correctly tailed // on POSIX and non-POSIX OSes like MS Windows. func OpenFile(name string) (file *os.File, err error) { return os.Open(name) }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/nxadm/tail/tail_windows.go
vendor/github.com/nxadm/tail/tail_windows.go
// Copyright (c) 2019 FOSS contributors of https://github.com/nxadm/tail // +build windows package tail import ( "os" "github.com/nxadm/tail/winfile" ) // Deprecated: this function is only useful internally and, as such, // it will be removed from the API in a future major release. // // OpenFile proxies a os.Open call for a file so it can be correctly tailed // on POSIX and non-POSIX OSes like MS Windows. func OpenFile(name string) (file *os.File, err error) { return winfile.OpenFile(name, os.O_RDONLY, 0) }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/nxadm/tail/util/util.go
vendor/github.com/nxadm/tail/util/util.go
// Copyright (c) 2019 FOSS contributors of https://github.com/nxadm/tail // Copyright (c) 2015 HPE Software Inc. All rights reserved. // Copyright (c) 2013 ActiveState Software Inc. All rights reserved. package util import ( "fmt" "log" "os" "runtime/debug" ) type Logger struct { *log.Logger } var LOGGER = &Logger{log.New(os.Stderr, "", log.LstdFlags)} // fatal is like panic except it displays only the current goroutine's stack. func Fatal(format string, v ...interface{}) { // https://github.com/nxadm/log/blob/master/log.go#L45 LOGGER.Output(2, fmt.Sprintf("FATAL -- "+format, v...)+"\n"+string(debug.Stack())) os.Exit(1) } // partitionString partitions the string into chunks of given size, // with the last chunk of variable size. func PartitionString(s string, chunkSize int) []string { if chunkSize <= 0 { panic("invalid chunkSize") } length := len(s) chunks := 1 + length/chunkSize start := 0 end := chunkSize parts := make([]string, 0, chunks) for { if end > length { end = length } parts = append(parts, s[start:end]) if end == length { break } start, end = end, end+chunkSize } return parts }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/nxadm/tail/ratelimiter/storage.go
vendor/github.com/nxadm/tail/ratelimiter/storage.go
package ratelimiter type Storage interface { GetBucketFor(string) (*LeakyBucket, error) SetBucketFor(string, LeakyBucket) error }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/nxadm/tail/ratelimiter/memory.go
vendor/github.com/nxadm/tail/ratelimiter/memory.go
package ratelimiter import ( "errors" "time" ) const ( GC_SIZE int = 100 GC_PERIOD time.Duration = 60 * time.Second ) type Memory struct { store map[string]LeakyBucket lastGCCollected time.Time } func NewMemory() *Memory { m := new(Memory) m.store = make(map[string]LeakyBucket) m.lastGCCollected = time.Now() return m } func (m *Memory) GetBucketFor(key string) (*LeakyBucket, error) { bucket, ok := m.store[key] if !ok { return nil, errors.New("miss") } return &bucket, nil } func (m *Memory) SetBucketFor(key string, bucket LeakyBucket) error { if len(m.store) > GC_SIZE { m.GarbageCollect() } m.store[key] = bucket return nil } func (m *Memory) GarbageCollect() { now := time.Now() // rate limit GC to once per minute if now.Unix() >= m.lastGCCollected.Add(GC_PERIOD).Unix() { for key, bucket := range m.store { // if the bucket is drained, then GC if bucket.DrainedAt().Unix() < now.Unix() { delete(m.store, key) } } m.lastGCCollected = now } }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/nxadm/tail/ratelimiter/leakybucket.go
vendor/github.com/nxadm/tail/ratelimiter/leakybucket.go
// Package ratelimiter implements the Leaky Bucket ratelimiting algorithm with memcached and in-memory backends. package ratelimiter import ( "time" ) type LeakyBucket struct { Size uint16 Fill float64 LeakInterval time.Duration // time.Duration for 1 unit of size to leak Lastupdate time.Time Now func() time.Time } func NewLeakyBucket(size uint16, leakInterval time.Duration) *LeakyBucket { bucket := LeakyBucket{ Size: size, Fill: 0, LeakInterval: leakInterval, Now: time.Now, Lastupdate: time.Now(), } return &bucket } func (b *LeakyBucket) updateFill() { now := b.Now() if b.Fill > 0 { elapsed := now.Sub(b.Lastupdate) b.Fill -= float64(elapsed) / float64(b.LeakInterval) if b.Fill < 0 { b.Fill = 0 } } b.Lastupdate = now } func (b *LeakyBucket) Pour(amount uint16) bool { b.updateFill() var newfill float64 = b.Fill + float64(amount) if newfill > float64(b.Size) { return false } b.Fill = newfill return true } // The time at which this bucket will be completely drained func (b *LeakyBucket) DrainedAt() time.Time { return b.Lastupdate.Add(time.Duration(b.Fill * float64(b.LeakInterval))) } // The duration until this bucket is completely drained func (b *LeakyBucket) TimeToDrain() time.Duration { return b.DrainedAt().Sub(b.Now()) } func (b *LeakyBucket) TimeSinceLastUpdate() time.Duration { return b.Now().Sub(b.Lastupdate) } type LeakyBucketSer struct { Size uint16 Fill float64 LeakInterval time.Duration // time.Duration for 1 unit of size to leak Lastupdate time.Time } func (b *LeakyBucket) Serialise() *LeakyBucketSer { bucket := LeakyBucketSer{ Size: b.Size, Fill: b.Fill, LeakInterval: b.LeakInterval, Lastupdate: b.Lastupdate, } return &bucket } func (b *LeakyBucketSer) DeSerialise() *LeakyBucket { bucket := LeakyBucket{ Size: b.Size, Fill: b.Fill, LeakInterval: b.LeakInterval, Lastupdate: b.Lastupdate, Now: time.Now, } return &bucket }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/nxadm/tail/winfile/winfile.go
vendor/github.com/nxadm/tail/winfile/winfile.go
// Copyright (c) 2019 FOSS contributors of https://github.com/nxadm/tail // +build windows package winfile import ( "os" "syscall" "unsafe" ) // issue also described here //https://codereview.appspot.com/8203043/ // https://github.com/jnwhiteh/golang/blob/master/src/pkg/syscall/syscall_windows.go#L218 func Open(path string, mode int, perm uint32) (fd syscall.Handle, err error) { if len(path) == 0 { return syscall.InvalidHandle, syscall.ERROR_FILE_NOT_FOUND } pathp, err := syscall.UTF16PtrFromString(path) if err != nil { return syscall.InvalidHandle, err } var access uint32 switch mode & (syscall.O_RDONLY | syscall.O_WRONLY | syscall.O_RDWR) { case syscall.O_RDONLY: access = syscall.GENERIC_READ case syscall.O_WRONLY: access = syscall.GENERIC_WRITE case syscall.O_RDWR: access = syscall.GENERIC_READ | syscall.GENERIC_WRITE } if mode&syscall.O_CREAT != 0 { access |= syscall.GENERIC_WRITE } if mode&syscall.O_APPEND != 0 { access &^= syscall.GENERIC_WRITE access |= syscall.FILE_APPEND_DATA } sharemode := uint32(syscall.FILE_SHARE_READ | syscall.FILE_SHARE_WRITE | syscall.FILE_SHARE_DELETE) var sa *syscall.SecurityAttributes if mode&syscall.O_CLOEXEC == 0 { sa = makeInheritSa() } var createmode uint32 switch { case mode&(syscall.O_CREAT|syscall.O_EXCL) == (syscall.O_CREAT | syscall.O_EXCL): createmode = syscall.CREATE_NEW case mode&(syscall.O_CREAT|syscall.O_TRUNC) == (syscall.O_CREAT | syscall.O_TRUNC): createmode = syscall.CREATE_ALWAYS case mode&syscall.O_CREAT == syscall.O_CREAT: createmode = syscall.OPEN_ALWAYS case mode&syscall.O_TRUNC == syscall.O_TRUNC: createmode = syscall.TRUNCATE_EXISTING default: createmode = syscall.OPEN_EXISTING } h, e := syscall.CreateFile(pathp, access, sharemode, sa, createmode, syscall.FILE_ATTRIBUTE_NORMAL, 0) return h, e } // https://github.com/jnwhiteh/golang/blob/master/src/pkg/syscall/syscall_windows.go#L211 func makeInheritSa() *syscall.SecurityAttributes { var sa syscall.SecurityAttributes sa.Length = uint32(unsafe.Sizeof(sa)) sa.InheritHandle = 1 return &sa } // https://github.com/jnwhiteh/golang/blob/master/src/pkg/os/file_windows.go#L133 func OpenFile(name string, flag int, perm os.FileMode) (file *os.File, err error) { r, e := Open(name, flag|syscall.O_CLOEXEC, syscallMode(perm)) if e != nil { return nil, e } return os.NewFile(uintptr(r), name), nil } // https://github.com/jnwhiteh/golang/blob/master/src/pkg/os/file_posix.go#L61 func syscallMode(i os.FileMode) (o uint32) { o |= uint32(i.Perm()) if i&os.ModeSetuid != 0 { o |= syscall.S_ISUID } if i&os.ModeSetgid != 0 { o |= syscall.S_ISGID } if i&os.ModeSticky != 0 { o |= syscall.S_ISVTX } // No mapping for Go's ModeTemporary (plan9 only). return }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/nxadm/tail/watch/polling.go
vendor/github.com/nxadm/tail/watch/polling.go
// Copyright (c) 2019 FOSS contributors of https://github.com/nxadm/tail // Copyright (c) 2015 HPE Software Inc. All rights reserved. // Copyright (c) 2013 ActiveState Software Inc. All rights reserved. package watch import ( "os" "runtime" "time" "github.com/nxadm/tail/util" "gopkg.in/tomb.v1" ) // PollingFileWatcher polls the file for changes. type PollingFileWatcher struct { Filename string Size int64 } func NewPollingFileWatcher(filename string) *PollingFileWatcher { fw := &PollingFileWatcher{filename, 0} return fw } var POLL_DURATION time.Duration func (fw *PollingFileWatcher) BlockUntilExists(t *tomb.Tomb) error { for { if _, err := os.Stat(fw.Filename); err == nil { return nil } else if !os.IsNotExist(err) { return err } select { case <-time.After(POLL_DURATION): continue case <-t.Dying(): return tomb.ErrDying } } panic("unreachable") } func (fw *PollingFileWatcher) ChangeEvents(t *tomb.Tomb, pos int64) (*FileChanges, error) { origFi, err := os.Stat(fw.Filename) if err != nil { return nil, err } changes := NewFileChanges() var prevModTime time.Time // XXX: use tomb.Tomb to cleanly manage these goroutines. replace // the fatal (below) with tomb's Kill. fw.Size = pos go func() { prevSize := fw.Size for { select { case <-t.Dying(): return default: } time.Sleep(POLL_DURATION) fi, err := os.Stat(fw.Filename) if err != nil { // Windows cannot delete a file if a handle is still open (tail keeps one open) // so it gives access denied to anything trying to read it until all handles are released. if os.IsNotExist(err) || (runtime.GOOS == "windows" && os.IsPermission(err)) { // File does not exist (has been deleted). changes.NotifyDeleted() return } // XXX: report this error back to the user util.Fatal("Failed to stat file %v: %v", fw.Filename, err) } // File got moved/renamed? if !os.SameFile(origFi, fi) { changes.NotifyDeleted() return } // File got truncated? fw.Size = fi.Size() if prevSize > 0 && prevSize > fw.Size { changes.NotifyTruncated() prevSize = fw.Size continue } // File got bigger? if prevSize > 0 && prevSize < fw.Size { changes.NotifyModified() prevSize = fw.Size continue } prevSize = fw.Size // File was appended to (changed)? modTime := fi.ModTime() if modTime != prevModTime { prevModTime = modTime changes.NotifyModified() } } }() return changes, nil } func init() { POLL_DURATION = 250 * time.Millisecond }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/nxadm/tail/watch/inotify.go
vendor/github.com/nxadm/tail/watch/inotify.go
// Copyright (c) 2019 FOSS contributors of https://github.com/nxadm/tail // Copyright (c) 2015 HPE Software Inc. All rights reserved. // Copyright (c) 2013 ActiveState Software Inc. All rights reserved. package watch import ( "fmt" "os" "path/filepath" "github.com/nxadm/tail/util" "github.com/fsnotify/fsnotify" "gopkg.in/tomb.v1" ) // InotifyFileWatcher uses inotify to monitor file changes. type InotifyFileWatcher struct { Filename string Size int64 } func NewInotifyFileWatcher(filename string) *InotifyFileWatcher { fw := &InotifyFileWatcher{filepath.Clean(filename), 0} return fw } func (fw *InotifyFileWatcher) BlockUntilExists(t *tomb.Tomb) error { err := WatchCreate(fw.Filename) if err != nil { return err } defer RemoveWatchCreate(fw.Filename) // Do a real check now as the file might have been created before // calling `WatchFlags` above. if _, err = os.Stat(fw.Filename); !os.IsNotExist(err) { // file exists, or stat returned an error. return err } events := Events(fw.Filename) for { select { case evt, ok := <-events: if !ok { return fmt.Errorf("inotify watcher has been closed") } evtName, err := filepath.Abs(evt.Name) if err != nil { return err } fwFilename, err := filepath.Abs(fw.Filename) if err != nil { return err } if evtName == fwFilename { return nil } case <-t.Dying(): return tomb.ErrDying } } panic("unreachable") } func (fw *InotifyFileWatcher) ChangeEvents(t *tomb.Tomb, pos int64) (*FileChanges, error) { err := Watch(fw.Filename) if err != nil { return nil, err } changes := NewFileChanges() fw.Size = pos go func() { events := Events(fw.Filename) for { prevSize := fw.Size var evt fsnotify.Event var ok bool select { case evt, ok = <-events: if !ok { RemoveWatch(fw.Filename) return } case <-t.Dying(): RemoveWatch(fw.Filename) return } switch { case evt.Op&fsnotify.Remove == fsnotify.Remove: fallthrough case evt.Op&fsnotify.Rename == fsnotify.Rename: RemoveWatch(fw.Filename) changes.NotifyDeleted() return //With an open fd, unlink(fd) - inotify returns IN_ATTRIB (==fsnotify.Chmod) case evt.Op&fsnotify.Chmod == fsnotify.Chmod: fallthrough case evt.Op&fsnotify.Write == fsnotify.Write: fi, err := os.Stat(fw.Filename) if err != nil { if os.IsNotExist(err) { RemoveWatch(fw.Filename) changes.NotifyDeleted() return } // XXX: report this error back to the user util.Fatal("Failed to stat file %v: %v", fw.Filename, err) } fw.Size = fi.Size() if prevSize > 0 && prevSize > fw.Size { changes.NotifyTruncated() } else { changes.NotifyModified() } prevSize = fw.Size } } }() return changes, nil }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/nxadm/tail/watch/watch.go
vendor/github.com/nxadm/tail/watch/watch.go
// Copyright (c) 2019 FOSS contributors of https://github.com/nxadm/tail // Copyright (c) 2015 HPE Software Inc. All rights reserved. // Copyright (c) 2013 ActiveState Software Inc. All rights reserved. package watch import "gopkg.in/tomb.v1" // FileWatcher monitors file-level events. type FileWatcher interface { // BlockUntilExists blocks until the file comes into existence. BlockUntilExists(*tomb.Tomb) error // ChangeEvents reports on changes to a file, be it modification, // deletion, renames or truncations. Returned FileChanges group of // channels will be closed, thus become unusable, after a deletion // or truncation event. // In order to properly report truncations, ChangeEvents requires // the caller to pass their current offset in the file. ChangeEvents(*tomb.Tomb, int64) (*FileChanges, error) }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/nxadm/tail/watch/inotify_tracker.go
vendor/github.com/nxadm/tail/watch/inotify_tracker.go
// Copyright (c) 2019 FOSS contributors of https://github.com/nxadm/tail // Copyright (c) 2015 HPE Software Inc. All rights reserved. // Copyright (c) 2013 ActiveState Software Inc. All rights reserved. package watch import ( "log" "os" "path/filepath" "sync" "syscall" "github.com/nxadm/tail/util" "github.com/fsnotify/fsnotify" ) type InotifyTracker struct { mux sync.Mutex watcher *fsnotify.Watcher chans map[string]chan fsnotify.Event done map[string]chan bool watchNums map[string]int watch chan *watchInfo remove chan *watchInfo error chan error } type watchInfo struct { op fsnotify.Op fname string } func (this *watchInfo) isCreate() bool { return this.op == fsnotify.Create } var ( // globally shared InotifyTracker; ensures only one fsnotify.Watcher is used shared *InotifyTracker // these are used to ensure the shared InotifyTracker is run exactly once once = sync.Once{} goRun = func() { shared = &InotifyTracker{ mux: sync.Mutex{}, chans: make(map[string]chan fsnotify.Event), done: make(map[string]chan bool), watchNums: make(map[string]int), watch: make(chan *watchInfo), remove: make(chan *watchInfo), error: make(chan error), } go shared.run() } logger = log.New(os.Stderr, "", log.LstdFlags) ) // Watch signals the run goroutine to begin watching the input filename func Watch(fname string) error { return watch(&watchInfo{ fname: fname, }) } // Watch create signals the run goroutine to begin watching the input filename // if call the WatchCreate function, don't call the Cleanup, call the RemoveWatchCreate func WatchCreate(fname string) error { return watch(&watchInfo{ op: fsnotify.Create, fname: fname, }) } func watch(winfo *watchInfo) error { // start running the shared InotifyTracker if not already running once.Do(goRun) winfo.fname = filepath.Clean(winfo.fname) shared.watch <- winfo return <-shared.error } // RemoveWatch signals the run goroutine to remove the watch for the input filename func RemoveWatch(fname string) error { return remove(&watchInfo{ fname: fname, }) } // RemoveWatch create signals the run goroutine to remove the watch for the input filename func RemoveWatchCreate(fname string) error { return remove(&watchInfo{ op: fsnotify.Create, fname: fname, }) } func remove(winfo *watchInfo) error { // start running the shared InotifyTracker if not already running once.Do(goRun) winfo.fname = filepath.Clean(winfo.fname) shared.mux.Lock() done := shared.done[winfo.fname] if done != nil { delete(shared.done, winfo.fname) close(done) } shared.mux.Unlock() shared.remove <- winfo return <-shared.error } // Events returns a channel to which FileEvents corresponding to the input filename // will be sent. This channel will be closed when removeWatch is called on this // filename. func Events(fname string) <-chan fsnotify.Event { shared.mux.Lock() defer shared.mux.Unlock() return shared.chans[fname] } // Cleanup removes the watch for the input filename if necessary. func Cleanup(fname string) error { return RemoveWatch(fname) } // watchFlags calls fsnotify.WatchFlags for the input filename and flags, creating // a new Watcher if the previous Watcher was closed. func (shared *InotifyTracker) addWatch(winfo *watchInfo) error { shared.mux.Lock() defer shared.mux.Unlock() if shared.chans[winfo.fname] == nil { shared.chans[winfo.fname] = make(chan fsnotify.Event) } if shared.done[winfo.fname] == nil { shared.done[winfo.fname] = make(chan bool) } fname := winfo.fname if winfo.isCreate() { // Watch for new files to be created in the parent directory. fname = filepath.Dir(fname) } var err error // already in inotify watch if shared.watchNums[fname] == 0 { err = shared.watcher.Add(fname) } if err == nil { shared.watchNums[fname]++ } return err } // removeWatch calls fsnotify.RemoveWatch for the input filename and closes the // corresponding events channel. func (shared *InotifyTracker) removeWatch(winfo *watchInfo) error { shared.mux.Lock() ch := shared.chans[winfo.fname] if ch != nil { delete(shared.chans, winfo.fname) close(ch) } fname := winfo.fname if winfo.isCreate() { // Watch for new files to be created in the parent directory. fname = filepath.Dir(fname) } shared.watchNums[fname]-- watchNum := shared.watchNums[fname] if watchNum == 0 { delete(shared.watchNums, fname) } shared.mux.Unlock() var err error // If we were the last ones to watch this file, unsubscribe from inotify. // This needs to happen after releasing the lock because fsnotify waits // synchronously for the kernel to acknowledge the removal of the watch // for this file, which causes us to deadlock if we still held the lock. if watchNum == 0 { err = shared.watcher.Remove(fname) } return err } // sendEvent sends the input event to the appropriate Tail. func (shared *InotifyTracker) sendEvent(event fsnotify.Event) { name := filepath.Clean(event.Name) shared.mux.Lock() ch := shared.chans[name] done := shared.done[name] shared.mux.Unlock() if ch != nil && done != nil { select { case ch <- event: case <-done: } } } // run starts the goroutine in which the shared struct reads events from its // Watcher's Event channel and sends the events to the appropriate Tail. func (shared *InotifyTracker) run() { watcher, err := fsnotify.NewWatcher() if err != nil { util.Fatal("failed to create Watcher") } shared.watcher = watcher for { select { case winfo := <-shared.watch: shared.error <- shared.addWatch(winfo) case winfo := <-shared.remove: shared.error <- shared.removeWatch(winfo) case event, open := <-shared.watcher.Events: if !open { return } shared.sendEvent(event) case err, open := <-shared.watcher.Errors: if !open { return } else if err != nil { sysErr, ok := err.(*os.SyscallError) if !ok || sysErr.Err != syscall.EINTR { logger.Printf("Error in Watcher Error channel: %s", err) } } } } }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/nxadm/tail/watch/filechanges.go
vendor/github.com/nxadm/tail/watch/filechanges.go
// Copyright (c) 2019 FOSS contributors of https://github.com/nxadm/tail package watch type FileChanges struct { Modified chan bool // Channel to get notified of modifications Truncated chan bool // Channel to get notified of truncations Deleted chan bool // Channel to get notified of deletions/renames } func NewFileChanges() *FileChanges { return &FileChanges{ make(chan bool, 1), make(chan bool, 1), make(chan bool, 1)} } func (fc *FileChanges) NotifyModified() { sendOnlyIfEmpty(fc.Modified) } func (fc *FileChanges) NotifyTruncated() { sendOnlyIfEmpty(fc.Truncated) } func (fc *FileChanges) NotifyDeleted() { sendOnlyIfEmpty(fc.Deleted) } // sendOnlyIfEmpty sends on a bool channel only if the channel has no // backlog to be read by other goroutines. This concurrency pattern // can be used to notify other goroutines if and only if they are // looking for it (i.e., subsequent notifications can be compressed // into one). func sendOnlyIfEmpty(ch chan bool) { select { case ch <- true: default: } }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/mailru/easyjson/jwriter/writer.go
vendor/github.com/mailru/easyjson/jwriter/writer.go
// Package jwriter contains a JSON writer. package jwriter import ( "io" "strconv" "unicode/utf8" "github.com/mailru/easyjson/buffer" ) // Flags describe various encoding options. The behavior may be actually implemented in the encoder, but // Flags field in Writer is used to set and pass them around. type Flags int const ( NilMapAsEmpty Flags = 1 << iota // Encode nil map as '{}' rather than 'null'. NilSliceAsEmpty // Encode nil slice as '[]' rather than 'null'. ) // Writer is a JSON writer. type Writer struct { Flags Flags Error error Buffer buffer.Buffer NoEscapeHTML bool } // Size returns the size of the data that was written out. func (w *Writer) Size() int { return w.Buffer.Size() } // DumpTo outputs the data to given io.Writer, resetting the buffer. func (w *Writer) DumpTo(out io.Writer) (written int, err error) { return w.Buffer.DumpTo(out) } // BuildBytes returns writer data as a single byte slice. You can optionally provide one byte slice // as argument that it will try to reuse. func (w *Writer) BuildBytes(reuse ...[]byte) ([]byte, error) { if w.Error != nil { return nil, w.Error } return w.Buffer.BuildBytes(reuse...), nil } // ReadCloser returns an io.ReadCloser that can be used to read the data. // ReadCloser also resets the buffer. func (w *Writer) ReadCloser() (io.ReadCloser, error) { if w.Error != nil { return nil, w.Error } return w.Buffer.ReadCloser(), nil } // RawByte appends raw binary data to the buffer. func (w *Writer) RawByte(c byte) { w.Buffer.AppendByte(c) } // RawByte appends raw binary data to the buffer. func (w *Writer) RawString(s string) { w.Buffer.AppendString(s) } // Raw appends raw binary data to the buffer or sets the error if it is given. Useful for // calling with results of MarshalJSON-like functions. func (w *Writer) Raw(data []byte, err error) { switch { case w.Error != nil: return case err != nil: w.Error = err case len(data) > 0: w.Buffer.AppendBytes(data) default: w.RawString("null") } } // RawText encloses raw binary data in quotes and appends in to the buffer. // Useful for calling with results of MarshalText-like functions. func (w *Writer) RawText(data []byte, err error) { switch { case w.Error != nil: return case err != nil: w.Error = err case len(data) > 0: w.String(string(data)) default: w.RawString("null") } } // Base64Bytes appends data to the buffer after base64 encoding it func (w *Writer) Base64Bytes(data []byte) { if data == nil { w.Buffer.AppendString("null") return } w.Buffer.AppendByte('"') w.base64(data) w.Buffer.AppendByte('"') } func (w *Writer) Uint8(n uint8) { w.Buffer.EnsureSpace(3) w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10) } func (w *Writer) Uint16(n uint16) { w.Buffer.EnsureSpace(5) w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10) } func (w *Writer) Uint32(n uint32) { w.Buffer.EnsureSpace(10) w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10) } func (w *Writer) Uint(n uint) { w.Buffer.EnsureSpace(20) w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10) } func (w *Writer) Uint64(n uint64) { w.Buffer.EnsureSpace(20) w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, n, 10) } func (w *Writer) Int8(n int8) { w.Buffer.EnsureSpace(4) w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10) } func (w *Writer) Int16(n int16) { w.Buffer.EnsureSpace(6) w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10) } func (w *Writer) Int32(n int32) { w.Buffer.EnsureSpace(11) w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10) } func (w *Writer) Int(n int) { w.Buffer.EnsureSpace(21) w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10) } func (w *Writer) Int64(n int64) { w.Buffer.EnsureSpace(21) w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, n, 10) } func (w *Writer) Uint8Str(n uint8) { w.Buffer.EnsureSpace(3) w.Buffer.Buf = append(w.Buffer.Buf, '"') w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10) w.Buffer.Buf = append(w.Buffer.Buf, '"') } func (w *Writer) Uint16Str(n uint16) { w.Buffer.EnsureSpace(5) w.Buffer.Buf = append(w.Buffer.Buf, '"') w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10) w.Buffer.Buf = append(w.Buffer.Buf, '"') } func (w *Writer) Uint32Str(n uint32) { w.Buffer.EnsureSpace(10) w.Buffer.Buf = append(w.Buffer.Buf, '"') w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10) w.Buffer.Buf = append(w.Buffer.Buf, '"') } func (w *Writer) UintStr(n uint) { w.Buffer.EnsureSpace(20) w.Buffer.Buf = append(w.Buffer.Buf, '"') w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10) w.Buffer.Buf = append(w.Buffer.Buf, '"') } func (w *Writer) Uint64Str(n uint64) { w.Buffer.EnsureSpace(20) w.Buffer.Buf = append(w.Buffer.Buf, '"') w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, n, 10) w.Buffer.Buf = append(w.Buffer.Buf, '"') } func (w *Writer) UintptrStr(n uintptr) { w.Buffer.EnsureSpace(20) w.Buffer.Buf = append(w.Buffer.Buf, '"') w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10) w.Buffer.Buf = append(w.Buffer.Buf, '"') } func (w *Writer) Int8Str(n int8) { w.Buffer.EnsureSpace(4) w.Buffer.Buf = append(w.Buffer.Buf, '"') w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10) w.Buffer.Buf = append(w.Buffer.Buf, '"') } func (w *Writer) Int16Str(n int16) { w.Buffer.EnsureSpace(6) w.Buffer.Buf = append(w.Buffer.Buf, '"') w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10) w.Buffer.Buf = append(w.Buffer.Buf, '"') } func (w *Writer) Int32Str(n int32) { w.Buffer.EnsureSpace(11) w.Buffer.Buf = append(w.Buffer.Buf, '"') w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10) w.Buffer.Buf = append(w.Buffer.Buf, '"') } func (w *Writer) IntStr(n int) { w.Buffer.EnsureSpace(21) w.Buffer.Buf = append(w.Buffer.Buf, '"') w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10) w.Buffer.Buf = append(w.Buffer.Buf, '"') } func (w *Writer) Int64Str(n int64) { w.Buffer.EnsureSpace(21) w.Buffer.Buf = append(w.Buffer.Buf, '"') w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, n, 10) w.Buffer.Buf = append(w.Buffer.Buf, '"') } func (w *Writer) Float32(n float32) { w.Buffer.EnsureSpace(20) w.Buffer.Buf = strconv.AppendFloat(w.Buffer.Buf, float64(n), 'g', -1, 32) } func (w *Writer) Float32Str(n float32) { w.Buffer.EnsureSpace(20) w.Buffer.Buf = append(w.Buffer.Buf, '"') w.Buffer.Buf = strconv.AppendFloat(w.Buffer.Buf, float64(n), 'g', -1, 32) w.Buffer.Buf = append(w.Buffer.Buf, '"') } func (w *Writer) Float64(n float64) { w.Buffer.EnsureSpace(20) w.Buffer.Buf = strconv.AppendFloat(w.Buffer.Buf, n, 'g', -1, 64) } func (w *Writer) Float64Str(n float64) { w.Buffer.EnsureSpace(20) w.Buffer.Buf = append(w.Buffer.Buf, '"') w.Buffer.Buf = strconv.AppendFloat(w.Buffer.Buf, float64(n), 'g', -1, 64) w.Buffer.Buf = append(w.Buffer.Buf, '"') } func (w *Writer) Bool(v bool) { w.Buffer.EnsureSpace(5) if v { w.Buffer.Buf = append(w.Buffer.Buf, "true"...) } else { w.Buffer.Buf = append(w.Buffer.Buf, "false"...) } } const chars = "0123456789abcdef" func getTable(falseValues ...int) [128]bool { table := [128]bool{} for i := 0; i < 128; i++ { table[i] = true } for _, v := range falseValues { table[v] = false } return table } var ( htmlEscapeTable = getTable(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, '"', '&', '<', '>', '\\') htmlNoEscapeTable = getTable(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, '"', '\\') ) func (w *Writer) String(s string) { w.Buffer.AppendByte('"') // Portions of the string that contain no escapes are appended as // byte slices. p := 0 // last non-escape symbol escapeTable := &htmlEscapeTable if w.NoEscapeHTML { escapeTable = &htmlNoEscapeTable } for i := 0; i < len(s); { c := s[i] if c < utf8.RuneSelf { if escapeTable[c] { // single-width character, no escaping is required i++ continue } w.Buffer.AppendString(s[p:i]) switch c { case '\t': w.Buffer.AppendString(`\t`) case '\r': w.Buffer.AppendString(`\r`) case '\n': w.Buffer.AppendString(`\n`) case '\\': w.Buffer.AppendString(`\\`) case '"': w.Buffer.AppendString(`\"`) default: w.Buffer.AppendString(`\u00`) w.Buffer.AppendByte(chars[c>>4]) w.Buffer.AppendByte(chars[c&0xf]) } i++ p = i continue } // broken utf runeValue, runeWidth := utf8.DecodeRuneInString(s[i:]) if runeValue == utf8.RuneError && runeWidth == 1 { w.Buffer.AppendString(s[p:i]) w.Buffer.AppendString(`\ufffd`) i++ p = i continue } // jsonp stuff - tab separator and line separator if runeValue == '\u2028' || runeValue == '\u2029' { w.Buffer.AppendString(s[p:i]) w.Buffer.AppendString(`\u202`) w.Buffer.AppendByte(chars[runeValue&0xf]) i += runeWidth p = i continue } i += runeWidth } w.Buffer.AppendString(s[p:]) w.Buffer.AppendByte('"') } const encode = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/" const padChar = '=' func (w *Writer) base64(in []byte) { if len(in) == 0 { return } w.Buffer.EnsureSpace(((len(in)-1)/3 + 1) * 4) si := 0 n := (len(in) / 3) * 3 for si < n { // Convert 3x 8bit source bytes into 4 bytes val := uint(in[si+0])<<16 | uint(in[si+1])<<8 | uint(in[si+2]) w.Buffer.Buf = append(w.Buffer.Buf, encode[val>>18&0x3F], encode[val>>12&0x3F], encode[val>>6&0x3F], encode[val&0x3F]) si += 3 } remain := len(in) - si if remain == 0 { return } // Add the remaining small block val := uint(in[si+0]) << 16 if remain == 2 { val |= uint(in[si+1]) << 8 } w.Buffer.Buf = append(w.Buffer.Buf, encode[val>>18&0x3F], encode[val>>12&0x3F]) switch remain { case 2: w.Buffer.Buf = append(w.Buffer.Buf, encode[val>>6&0x3F], byte(padChar)) case 1: w.Buffer.Buf = append(w.Buffer.Buf, byte(padChar), byte(padChar)) } }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/mailru/easyjson/jlexer/bytestostr.go
vendor/github.com/mailru/easyjson/jlexer/bytestostr.go
// This file will only be included to the build if neither // easyjson_nounsafe nor appengine build tag is set. See README notes // for more details. //+build !easyjson_nounsafe //+build !appengine package jlexer import ( "reflect" "unsafe" ) // bytesToStr creates a string pointing at the slice to avoid copying. // // Warning: the string returned by the function should be used with care, as the whole input data // chunk may be either blocked from being freed by GC because of a single string or the buffer.Data // may be garbage-collected even when the string exists. func bytesToStr(data []byte) string { h := (*reflect.SliceHeader)(unsafe.Pointer(&data)) shdr := reflect.StringHeader{Data: h.Data, Len: h.Len} return *(*string)(unsafe.Pointer(&shdr)) }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/mailru/easyjson/jlexer/error.go
vendor/github.com/mailru/easyjson/jlexer/error.go
package jlexer import "fmt" // LexerError implements the error interface and represents all possible errors that can be // generated during parsing the JSON data. type LexerError struct { Reason string Offset int Data string } func (l *LexerError) Error() string { return fmt.Sprintf("parse error: %s near offset %d of '%s'", l.Reason, l.Offset, l.Data) }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/mailru/easyjson/jlexer/lexer.go
vendor/github.com/mailru/easyjson/jlexer/lexer.go
// Package jlexer contains a JSON lexer implementation. // // It is expected that it is mostly used with generated parser code, so the interface is tuned // for a parser that knows what kind of data is expected. package jlexer import ( "bytes" "encoding/base64" "encoding/json" "errors" "fmt" "io" "strconv" "unicode" "unicode/utf16" "unicode/utf8" "github.com/josharian/intern" ) // tokenKind determines type of a token. type tokenKind byte const ( tokenUndef tokenKind = iota // No token. tokenDelim // Delimiter: one of '{', '}', '[' or ']'. tokenString // A string literal, e.g. "abc\u1234" tokenNumber // Number literal, e.g. 1.5e5 tokenBool // Boolean literal: true or false. tokenNull // null keyword. ) // token describes a single token: type, position in the input and value. type token struct { kind tokenKind // Type of a token. boolValue bool // Value if a boolean literal token. byteValueCloned bool // true if byteValue was allocated and does not refer to original json body byteValue []byte // Raw value of a token. delimValue byte } // Lexer is a JSON lexer: it iterates over JSON tokens in a byte slice. type Lexer struct { Data []byte // Input data given to the lexer. start int // Start of the current token. pos int // Current unscanned position in the input stream. token token // Last scanned token, if token.kind != tokenUndef. firstElement bool // Whether current element is the first in array or an object. wantSep byte // A comma or a colon character, which need to occur before a token. UseMultipleErrors bool // If we want to use multiple errors. fatalError error // Fatal error occurred during lexing. It is usually a syntax error. multipleErrors []*LexerError // Semantic errors occurred during lexing. Marshalling will be continued after finding this errors. } // FetchToken scans the input for the next token. func (r *Lexer) FetchToken() { r.token.kind = tokenUndef r.start = r.pos // Check if r.Data has r.pos element // If it doesn't, it mean corrupted input data if len(r.Data) < r.pos { r.errParse("Unexpected end of data") return } // Determine the type of a token by skipping whitespace and reading the // first character. for _, c := range r.Data[r.pos:] { switch c { case ':', ',': if r.wantSep == c { r.pos++ r.start++ r.wantSep = 0 } else { r.errSyntax() } case ' ', '\t', '\r', '\n': r.pos++ r.start++ case '"': if r.wantSep != 0 { r.errSyntax() } r.token.kind = tokenString r.fetchString() return case '{', '[': if r.wantSep != 0 { r.errSyntax() } r.firstElement = true r.token.kind = tokenDelim r.token.delimValue = r.Data[r.pos] r.pos++ return case '}', ']': if !r.firstElement && (r.wantSep != ',') { r.errSyntax() } r.wantSep = 0 r.token.kind = tokenDelim r.token.delimValue = r.Data[r.pos] r.pos++ return case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '-': if r.wantSep != 0 { r.errSyntax() } r.token.kind = tokenNumber r.fetchNumber() return case 'n': if r.wantSep != 0 { r.errSyntax() } r.token.kind = tokenNull r.fetchNull() return case 't': if r.wantSep != 0 { r.errSyntax() } r.token.kind = tokenBool r.token.boolValue = true r.fetchTrue() return case 'f': if r.wantSep != 0 { r.errSyntax() } r.token.kind = tokenBool r.token.boolValue = false r.fetchFalse() return default: r.errSyntax() return } } r.fatalError = io.EOF return } // isTokenEnd returns true if the char can follow a non-delimiter token func isTokenEnd(c byte) bool { return c == ' ' || c == '\t' || c == '\r' || c == '\n' || c == '[' || c == ']' || c == '{' || c == '}' || c == ',' || c == ':' } // fetchNull fetches and checks remaining bytes of null keyword. func (r *Lexer) fetchNull() { r.pos += 4 if r.pos > len(r.Data) || r.Data[r.pos-3] != 'u' || r.Data[r.pos-2] != 'l' || r.Data[r.pos-1] != 'l' || (r.pos != len(r.Data) && !isTokenEnd(r.Data[r.pos])) { r.pos -= 4 r.errSyntax() } } // fetchTrue fetches and checks remaining bytes of true keyword. func (r *Lexer) fetchTrue() { r.pos += 4 if r.pos > len(r.Data) || r.Data[r.pos-3] != 'r' || r.Data[r.pos-2] != 'u' || r.Data[r.pos-1] != 'e' || (r.pos != len(r.Data) && !isTokenEnd(r.Data[r.pos])) { r.pos -= 4 r.errSyntax() } } // fetchFalse fetches and checks remaining bytes of false keyword. func (r *Lexer) fetchFalse() { r.pos += 5 if r.pos > len(r.Data) || r.Data[r.pos-4] != 'a' || r.Data[r.pos-3] != 'l' || r.Data[r.pos-2] != 's' || r.Data[r.pos-1] != 'e' || (r.pos != len(r.Data) && !isTokenEnd(r.Data[r.pos])) { r.pos -= 5 r.errSyntax() } } // fetchNumber scans a number literal token. func (r *Lexer) fetchNumber() { hasE := false afterE := false hasDot := false r.pos++ for i, c := range r.Data[r.pos:] { switch { case c >= '0' && c <= '9': afterE = false case c == '.' && !hasDot: hasDot = true case (c == 'e' || c == 'E') && !hasE: hasE = true hasDot = true afterE = true case (c == '+' || c == '-') && afterE: afterE = false default: r.pos += i if !isTokenEnd(c) { r.errSyntax() } else { r.token.byteValue = r.Data[r.start:r.pos] } return } } r.pos = len(r.Data) r.token.byteValue = r.Data[r.start:] } // findStringLen tries to scan into the string literal for ending quote char to determine required size. // The size will be exact if no escapes are present and may be inexact if there are escaped chars. func findStringLen(data []byte) (isValid bool, length int) { for { idx := bytes.IndexByte(data, '"') if idx == -1 { return false, len(data) } if idx == 0 || (idx > 0 && data[idx-1] != '\\') { return true, length + idx } // count \\\\\\\ sequences. even number of slashes means quote is not really escaped cnt := 1 for idx-cnt-1 >= 0 && data[idx-cnt-1] == '\\' { cnt++ } if cnt%2 == 0 { return true, length + idx } length += idx + 1 data = data[idx+1:] } } // unescapeStringToken performs unescaping of string token. // if no escaping is needed, original string is returned, otherwise - a new one allocated func (r *Lexer) unescapeStringToken() (err error) { data := r.token.byteValue var unescapedData []byte for { i := bytes.IndexByte(data, '\\') if i == -1 { break } escapedRune, escapedBytes, err := decodeEscape(data[i:]) if err != nil { r.errParse(err.Error()) return err } if unescapedData == nil { unescapedData = make([]byte, 0, len(r.token.byteValue)) } var d [4]byte s := utf8.EncodeRune(d[:], escapedRune) unescapedData = append(unescapedData, data[:i]...) unescapedData = append(unescapedData, d[:s]...) data = data[i+escapedBytes:] } if unescapedData != nil { r.token.byteValue = append(unescapedData, data...) r.token.byteValueCloned = true } return } // getu4 decodes \uXXXX from the beginning of s, returning the hex value, // or it returns -1. func getu4(s []byte) rune { if len(s) < 6 || s[0] != '\\' || s[1] != 'u' { return -1 } var val rune for i := 2; i < len(s) && i < 6; i++ { var v byte c := s[i] switch c { case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': v = c - '0' case 'a', 'b', 'c', 'd', 'e', 'f': v = c - 'a' + 10 case 'A', 'B', 'C', 'D', 'E', 'F': v = c - 'A' + 10 default: return -1 } val <<= 4 val |= rune(v) } return val } // decodeEscape processes a single escape sequence and returns number of bytes processed. func decodeEscape(data []byte) (decoded rune, bytesProcessed int, err error) { if len(data) < 2 { return 0, 0, errors.New("incorrect escape symbol \\ at the end of token") } c := data[1] switch c { case '"', '/', '\\': return rune(c), 2, nil case 'b': return '\b', 2, nil case 'f': return '\f', 2, nil case 'n': return '\n', 2, nil case 'r': return '\r', 2, nil case 't': return '\t', 2, nil case 'u': rr := getu4(data) if rr < 0 { return 0, 0, errors.New("incorrectly escaped \\uXXXX sequence") } read := 6 if utf16.IsSurrogate(rr) { rr1 := getu4(data[read:]) if dec := utf16.DecodeRune(rr, rr1); dec != unicode.ReplacementChar { read += 6 rr = dec } else { rr = unicode.ReplacementChar } } return rr, read, nil } return 0, 0, errors.New("incorrectly escaped bytes") } // fetchString scans a string literal token. func (r *Lexer) fetchString() { r.pos++ data := r.Data[r.pos:] isValid, length := findStringLen(data) if !isValid { r.pos += length r.errParse("unterminated string literal") return } r.token.byteValue = data[:length] r.pos += length + 1 // skip closing '"' as well } // scanToken scans the next token if no token is currently available in the lexer. func (r *Lexer) scanToken() { if r.token.kind != tokenUndef || r.fatalError != nil { return } r.FetchToken() } // consume resets the current token to allow scanning the next one. func (r *Lexer) consume() { r.token.kind = tokenUndef r.token.byteValueCloned = false r.token.delimValue = 0 } // Ok returns true if no error (including io.EOF) was encountered during scanning. func (r *Lexer) Ok() bool { return r.fatalError == nil } const maxErrorContextLen = 13 func (r *Lexer) errParse(what string) { if r.fatalError == nil { var str string if len(r.Data)-r.pos <= maxErrorContextLen { str = string(r.Data) } else { str = string(r.Data[r.pos:r.pos+maxErrorContextLen-3]) + "..." } r.fatalError = &LexerError{ Reason: what, Offset: r.pos, Data: str, } } } func (r *Lexer) errSyntax() { r.errParse("syntax error") } func (r *Lexer) errInvalidToken(expected string) { if r.fatalError != nil { return } if r.UseMultipleErrors { r.pos = r.start r.consume() r.SkipRecursive() switch expected { case "[": r.token.delimValue = ']' r.token.kind = tokenDelim case "{": r.token.delimValue = '}' r.token.kind = tokenDelim } r.addNonfatalError(&LexerError{ Reason: fmt.Sprintf("expected %s", expected), Offset: r.start, Data: string(r.Data[r.start:r.pos]), }) return } var str string if len(r.token.byteValue) <= maxErrorContextLen { str = string(r.token.byteValue) } else { str = string(r.token.byteValue[:maxErrorContextLen-3]) + "..." } r.fatalError = &LexerError{ Reason: fmt.Sprintf("expected %s", expected), Offset: r.pos, Data: str, } } func (r *Lexer) GetPos() int { return r.pos } // Delim consumes a token and verifies that it is the given delimiter. func (r *Lexer) Delim(c byte) { if r.token.kind == tokenUndef && r.Ok() { r.FetchToken() } if !r.Ok() || r.token.delimValue != c { r.consume() // errInvalidToken can change token if UseMultipleErrors is enabled. r.errInvalidToken(string([]byte{c})) } else { r.consume() } } // IsDelim returns true if there was no scanning error and next token is the given delimiter. func (r *Lexer) IsDelim(c byte) bool { if r.token.kind == tokenUndef && r.Ok() { r.FetchToken() } return !r.Ok() || r.token.delimValue == c } // Null verifies that the next token is null and consumes it. func (r *Lexer) Null() { if r.token.kind == tokenUndef && r.Ok() { r.FetchToken() } if !r.Ok() || r.token.kind != tokenNull { r.errInvalidToken("null") } r.consume() } // IsNull returns true if the next token is a null keyword. func (r *Lexer) IsNull() bool { if r.token.kind == tokenUndef && r.Ok() { r.FetchToken() } return r.Ok() && r.token.kind == tokenNull } // Skip skips a single token. func (r *Lexer) Skip() { if r.token.kind == tokenUndef && r.Ok() { r.FetchToken() } r.consume() } // SkipRecursive skips next array or object completely, or just skips a single token if not // an array/object. // // Note: no syntax validation is performed on the skipped data. func (r *Lexer) SkipRecursive() { r.scanToken() var start, end byte startPos := r.start switch r.token.delimValue { case '{': start, end = '{', '}' case '[': start, end = '[', ']' default: r.consume() return } r.consume() level := 1 inQuotes := false wasEscape := false for i, c := range r.Data[r.pos:] { switch { case c == start && !inQuotes: level++ case c == end && !inQuotes: level-- if level == 0 { r.pos += i + 1 if !json.Valid(r.Data[startPos:r.pos]) { r.pos = len(r.Data) r.fatalError = &LexerError{ Reason: "skipped array/object json value is invalid", Offset: r.pos, Data: string(r.Data[r.pos:]), } } return } case c == '\\' && inQuotes: wasEscape = !wasEscape continue case c == '"' && inQuotes: inQuotes = wasEscape case c == '"': inQuotes = true } wasEscape = false } r.pos = len(r.Data) r.fatalError = &LexerError{ Reason: "EOF reached while skipping array/object or token", Offset: r.pos, Data: string(r.Data[r.pos:]), } } // Raw fetches the next item recursively as a data slice func (r *Lexer) Raw() []byte { r.SkipRecursive() if !r.Ok() { return nil } return r.Data[r.start:r.pos] } // IsStart returns whether the lexer is positioned at the start // of an input string. func (r *Lexer) IsStart() bool { return r.pos == 0 } // Consumed reads all remaining bytes from the input, publishing an error if // there is anything but whitespace remaining. func (r *Lexer) Consumed() { if r.pos > len(r.Data) || !r.Ok() { return } for _, c := range r.Data[r.pos:] { if c != ' ' && c != '\t' && c != '\r' && c != '\n' { r.AddError(&LexerError{ Reason: "invalid character '" + string(c) + "' after top-level value", Offset: r.pos, Data: string(r.Data[r.pos:]), }) return } r.pos++ r.start++ } } func (r *Lexer) unsafeString(skipUnescape bool) (string, []byte) { if r.token.kind == tokenUndef && r.Ok() { r.FetchToken() } if !r.Ok() || r.token.kind != tokenString { r.errInvalidToken("string") return "", nil } if !skipUnescape { if err := r.unescapeStringToken(); err != nil { r.errInvalidToken("string") return "", nil } } bytes := r.token.byteValue ret := bytesToStr(r.token.byteValue) r.consume() return ret, bytes } // UnsafeString returns the string value if the token is a string literal. // // Warning: returned string may point to the input buffer, so the string should not outlive // the input buffer. Intended pattern of usage is as an argument to a switch statement. func (r *Lexer) UnsafeString() string { ret, _ := r.unsafeString(false) return ret } // UnsafeBytes returns the byte slice if the token is a string literal. func (r *Lexer) UnsafeBytes() []byte { _, ret := r.unsafeString(false) return ret } // UnsafeFieldName returns current member name string token func (r *Lexer) UnsafeFieldName(skipUnescape bool) string { ret, _ := r.unsafeString(skipUnescape) return ret } // String reads a string literal. func (r *Lexer) String() string { if r.token.kind == tokenUndef && r.Ok() { r.FetchToken() } if !r.Ok() || r.token.kind != tokenString { r.errInvalidToken("string") return "" } if err := r.unescapeStringToken(); err != nil { r.errInvalidToken("string") return "" } var ret string if r.token.byteValueCloned { ret = bytesToStr(r.token.byteValue) } else { ret = string(r.token.byteValue) } r.consume() return ret } // StringIntern reads a string literal, and performs string interning on it. func (r *Lexer) StringIntern() string { if r.token.kind == tokenUndef && r.Ok() { r.FetchToken() } if !r.Ok() || r.token.kind != tokenString { r.errInvalidToken("string") return "" } if err := r.unescapeStringToken(); err != nil { r.errInvalidToken("string") return "" } ret := intern.Bytes(r.token.byteValue) r.consume() return ret } // Bytes reads a string literal and base64 decodes it into a byte slice. func (r *Lexer) Bytes() []byte { if r.token.kind == tokenUndef && r.Ok() { r.FetchToken() } if !r.Ok() || r.token.kind != tokenString { r.errInvalidToken("string") return nil } if err := r.unescapeStringToken(); err != nil { r.errInvalidToken("string") return nil } ret := make([]byte, base64.StdEncoding.DecodedLen(len(r.token.byteValue))) n, err := base64.StdEncoding.Decode(ret, r.token.byteValue) if err != nil { r.fatalError = &LexerError{ Reason: err.Error(), } return nil } r.consume() return ret[:n] } // Bool reads a true or false boolean keyword. func (r *Lexer) Bool() bool { if r.token.kind == tokenUndef && r.Ok() { r.FetchToken() } if !r.Ok() || r.token.kind != tokenBool { r.errInvalidToken("bool") return false } ret := r.token.boolValue r.consume() return ret } func (r *Lexer) number() string { if r.token.kind == tokenUndef && r.Ok() { r.FetchToken() } if !r.Ok() || r.token.kind != tokenNumber { r.errInvalidToken("number") return "" } ret := bytesToStr(r.token.byteValue) r.consume() return ret } func (r *Lexer) Uint8() uint8 { s := r.number() if !r.Ok() { return 0 } n, err := strconv.ParseUint(s, 10, 8) if err != nil { r.addNonfatalError(&LexerError{ Offset: r.start, Reason: err.Error(), Data: s, }) } return uint8(n) } func (r *Lexer) Uint16() uint16 { s := r.number() if !r.Ok() { return 0 } n, err := strconv.ParseUint(s, 10, 16) if err != nil { r.addNonfatalError(&LexerError{ Offset: r.start, Reason: err.Error(), Data: s, }) } return uint16(n) } func (r *Lexer) Uint32() uint32 { s := r.number() if !r.Ok() { return 0 } n, err := strconv.ParseUint(s, 10, 32) if err != nil { r.addNonfatalError(&LexerError{ Offset: r.start, Reason: err.Error(), Data: s, }) } return uint32(n) } func (r *Lexer) Uint64() uint64 { s := r.number() if !r.Ok() { return 0 } n, err := strconv.ParseUint(s, 10, 64) if err != nil { r.addNonfatalError(&LexerError{ Offset: r.start, Reason: err.Error(), Data: s, }) } return n } func (r *Lexer) Uint() uint { return uint(r.Uint64()) } func (r *Lexer) Int8() int8 { s := r.number() if !r.Ok() { return 0 } n, err := strconv.ParseInt(s, 10, 8) if err != nil { r.addNonfatalError(&LexerError{ Offset: r.start, Reason: err.Error(), Data: s, }) } return int8(n) } func (r *Lexer) Int16() int16 { s := r.number() if !r.Ok() { return 0 } n, err := strconv.ParseInt(s, 10, 16) if err != nil { r.addNonfatalError(&LexerError{ Offset: r.start, Reason: err.Error(), Data: s, }) } return int16(n) } func (r *Lexer) Int32() int32 { s := r.number() if !r.Ok() { return 0 } n, err := strconv.ParseInt(s, 10, 32) if err != nil { r.addNonfatalError(&LexerError{ Offset: r.start, Reason: err.Error(), Data: s, }) } return int32(n) } func (r *Lexer) Int64() int64 { s := r.number() if !r.Ok() { return 0 } n, err := strconv.ParseInt(s, 10, 64) if err != nil { r.addNonfatalError(&LexerError{ Offset: r.start, Reason: err.Error(), Data: s, }) } return n } func (r *Lexer) Int() int { return int(r.Int64()) } func (r *Lexer) Uint8Str() uint8 { s, b := r.unsafeString(false) if !r.Ok() { return 0 } n, err := strconv.ParseUint(s, 10, 8) if err != nil { r.addNonfatalError(&LexerError{ Offset: r.start, Reason: err.Error(), Data: string(b), }) } return uint8(n) } func (r *Lexer) Uint16Str() uint16 { s, b := r.unsafeString(false) if !r.Ok() { return 0 } n, err := strconv.ParseUint(s, 10, 16) if err != nil { r.addNonfatalError(&LexerError{ Offset: r.start, Reason: err.Error(), Data: string(b), }) } return uint16(n) } func (r *Lexer) Uint32Str() uint32 { s, b := r.unsafeString(false) if !r.Ok() { return 0 } n, err := strconv.ParseUint(s, 10, 32) if err != nil { r.addNonfatalError(&LexerError{ Offset: r.start, Reason: err.Error(), Data: string(b), }) } return uint32(n) } func (r *Lexer) Uint64Str() uint64 { s, b := r.unsafeString(false) if !r.Ok() { return 0 } n, err := strconv.ParseUint(s, 10, 64) if err != nil { r.addNonfatalError(&LexerError{ Offset: r.start, Reason: err.Error(), Data: string(b), }) } return n } func (r *Lexer) UintStr() uint { return uint(r.Uint64Str()) } func (r *Lexer) UintptrStr() uintptr { return uintptr(r.Uint64Str()) } func (r *Lexer) Int8Str() int8 { s, b := r.unsafeString(false) if !r.Ok() { return 0 } n, err := strconv.ParseInt(s, 10, 8) if err != nil { r.addNonfatalError(&LexerError{ Offset: r.start, Reason: err.Error(), Data: string(b), }) } return int8(n) } func (r *Lexer) Int16Str() int16 { s, b := r.unsafeString(false) if !r.Ok() { return 0 } n, err := strconv.ParseInt(s, 10, 16) if err != nil { r.addNonfatalError(&LexerError{ Offset: r.start, Reason: err.Error(), Data: string(b), }) } return int16(n) } func (r *Lexer) Int32Str() int32 { s, b := r.unsafeString(false) if !r.Ok() { return 0 } n, err := strconv.ParseInt(s, 10, 32) if err != nil { r.addNonfatalError(&LexerError{ Offset: r.start, Reason: err.Error(), Data: string(b), }) } return int32(n) } func (r *Lexer) Int64Str() int64 { s, b := r.unsafeString(false) if !r.Ok() { return 0 } n, err := strconv.ParseInt(s, 10, 64) if err != nil { r.addNonfatalError(&LexerError{ Offset: r.start, Reason: err.Error(), Data: string(b), }) } return n } func (r *Lexer) IntStr() int { return int(r.Int64Str()) } func (r *Lexer) Float32() float32 { s := r.number() if !r.Ok() { return 0 } n, err := strconv.ParseFloat(s, 32) if err != nil { r.addNonfatalError(&LexerError{ Offset: r.start, Reason: err.Error(), Data: s, }) } return float32(n) } func (r *Lexer) Float32Str() float32 { s, b := r.unsafeString(false) if !r.Ok() { return 0 } n, err := strconv.ParseFloat(s, 32) if err != nil { r.addNonfatalError(&LexerError{ Offset: r.start, Reason: err.Error(), Data: string(b), }) } return float32(n) } func (r *Lexer) Float64() float64 { s := r.number() if !r.Ok() { return 0 } n, err := strconv.ParseFloat(s, 64) if err != nil { r.addNonfatalError(&LexerError{ Offset: r.start, Reason: err.Error(), Data: s, }) } return n } func (r *Lexer) Float64Str() float64 { s, b := r.unsafeString(false) if !r.Ok() { return 0 } n, err := strconv.ParseFloat(s, 64) if err != nil { r.addNonfatalError(&LexerError{ Offset: r.start, Reason: err.Error(), Data: string(b), }) } return n } func (r *Lexer) Error() error { return r.fatalError } func (r *Lexer) AddError(e error) { if r.fatalError == nil { r.fatalError = e } } func (r *Lexer) AddNonFatalError(e error) { r.addNonfatalError(&LexerError{ Offset: r.start, Data: string(r.Data[r.start:r.pos]), Reason: e.Error(), }) } func (r *Lexer) addNonfatalError(err *LexerError) { if r.UseMultipleErrors { // We don't want to add errors with the same offset. if len(r.multipleErrors) != 0 && r.multipleErrors[len(r.multipleErrors)-1].Offset == err.Offset { return } r.multipleErrors = append(r.multipleErrors, err) return } r.fatalError = err } func (r *Lexer) GetNonFatalErrors() []*LexerError { return r.multipleErrors } // JsonNumber fetches and json.Number from 'encoding/json' package. // Both int, float or string, contains them are valid values func (r *Lexer) JsonNumber() json.Number { if r.token.kind == tokenUndef && r.Ok() { r.FetchToken() } if !r.Ok() { r.errInvalidToken("json.Number") return json.Number("") } switch r.token.kind { case tokenString: return json.Number(r.String()) case tokenNumber: return json.Number(r.Raw()) case tokenNull: r.Null() return json.Number("") default: r.errSyntax() return json.Number("") } } // Interface fetches an interface{} analogous to the 'encoding/json' package. func (r *Lexer) Interface() interface{} { if r.token.kind == tokenUndef && r.Ok() { r.FetchToken() } if !r.Ok() { return nil } switch r.token.kind { case tokenString: return r.String() case tokenNumber: return r.Float64() case tokenBool: return r.Bool() case tokenNull: r.Null() return nil } if r.token.delimValue == '{' { r.consume() ret := map[string]interface{}{} for !r.IsDelim('}') { key := r.String() r.WantColon() ret[key] = r.Interface() r.WantComma() } r.Delim('}') if r.Ok() { return ret } else { return nil } } else if r.token.delimValue == '[' { r.consume() ret := []interface{}{} for !r.IsDelim(']') { ret = append(ret, r.Interface()) r.WantComma() } r.Delim(']') if r.Ok() { return ret } else { return nil } } r.errSyntax() return nil } // WantComma requires a comma to be present before fetching next token. func (r *Lexer) WantComma() { r.wantSep = ',' r.firstElement = false } // WantColon requires a colon to be present before fetching next token. func (r *Lexer) WantColon() { r.wantSep = ':' r.firstElement = false }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/mailru/easyjson/jlexer/bytestostr_nounsafe.go
vendor/github.com/mailru/easyjson/jlexer/bytestostr_nounsafe.go
// This file is included to the build if any of the buildtags below // are defined. Refer to README notes for more details. //+build easyjson_nounsafe appengine package jlexer // bytesToStr creates a string normally from []byte // // Note that this method is roughly 1.5x slower than using the 'unsafe' method. func bytesToStr(data []byte) string { return string(data) }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/mailru/easyjson/buffer/pool.go
vendor/github.com/mailru/easyjson/buffer/pool.go
// Package buffer implements a buffer for serialization, consisting of a chain of []byte-s to // reduce copying and to allow reuse of individual chunks. package buffer import ( "io" "net" "sync" ) // PoolConfig contains configuration for the allocation and reuse strategy. type PoolConfig struct { StartSize int // Minimum chunk size that is allocated. PooledSize int // Minimum chunk size that is reused, reusing chunks too small will result in overhead. MaxSize int // Maximum chunk size that will be allocated. } var config = PoolConfig{ StartSize: 128, PooledSize: 512, MaxSize: 32768, } // Reuse pool: chunk size -> pool. var buffers = map[int]*sync.Pool{} func initBuffers() { for l := config.PooledSize; l <= config.MaxSize; l *= 2 { buffers[l] = new(sync.Pool) } } func init() { initBuffers() } // Init sets up a non-default pooling and allocation strategy. Should be run before serialization is done. func Init(cfg PoolConfig) { config = cfg initBuffers() } // putBuf puts a chunk to reuse pool if it can be reused. func putBuf(buf []byte) { size := cap(buf) if size < config.PooledSize { return } if c := buffers[size]; c != nil { c.Put(buf[:0]) } } // getBuf gets a chunk from reuse pool or creates a new one if reuse failed. func getBuf(size int) []byte { if size >= config.PooledSize { if c := buffers[size]; c != nil { v := c.Get() if v != nil { return v.([]byte) } } } return make([]byte, 0, size) } // Buffer is a buffer optimized for serialization without extra copying. type Buffer struct { // Buf is the current chunk that can be used for serialization. Buf []byte toPool []byte bufs [][]byte } // EnsureSpace makes sure that the current chunk contains at least s free bytes, // possibly creating a new chunk. func (b *Buffer) EnsureSpace(s int) { if cap(b.Buf)-len(b.Buf) < s { b.ensureSpaceSlow(s) } } func (b *Buffer) ensureSpaceSlow(s int) { l := len(b.Buf) if l > 0 { if cap(b.toPool) != cap(b.Buf) { // Chunk was reallocated, toPool can be pooled. putBuf(b.toPool) } if cap(b.bufs) == 0 { b.bufs = make([][]byte, 0, 8) } b.bufs = append(b.bufs, b.Buf) l = cap(b.toPool) * 2 } else { l = config.StartSize } if l > config.MaxSize { l = config.MaxSize } b.Buf = getBuf(l) b.toPool = b.Buf } // AppendByte appends a single byte to buffer. func (b *Buffer) AppendByte(data byte) { b.EnsureSpace(1) b.Buf = append(b.Buf, data) } // AppendBytes appends a byte slice to buffer. func (b *Buffer) AppendBytes(data []byte) { if len(data) <= cap(b.Buf)-len(b.Buf) { b.Buf = append(b.Buf, data...) // fast path } else { b.appendBytesSlow(data) } } func (b *Buffer) appendBytesSlow(data []byte) { for len(data) > 0 { b.EnsureSpace(1) sz := cap(b.Buf) - len(b.Buf) if sz > len(data) { sz = len(data) } b.Buf = append(b.Buf, data[:sz]...) data = data[sz:] } } // AppendString appends a string to buffer. func (b *Buffer) AppendString(data string) { if len(data) <= cap(b.Buf)-len(b.Buf) { b.Buf = append(b.Buf, data...) // fast path } else { b.appendStringSlow(data) } } func (b *Buffer) appendStringSlow(data string) { for len(data) > 0 { b.EnsureSpace(1) sz := cap(b.Buf) - len(b.Buf) if sz > len(data) { sz = len(data) } b.Buf = append(b.Buf, data[:sz]...) data = data[sz:] } } // Size computes the size of a buffer by adding sizes of every chunk. func (b *Buffer) Size() int { size := len(b.Buf) for _, buf := range b.bufs { size += len(buf) } return size } // DumpTo outputs the contents of a buffer to a writer and resets the buffer. func (b *Buffer) DumpTo(w io.Writer) (written int, err error) { bufs := net.Buffers(b.bufs) if len(b.Buf) > 0 { bufs = append(bufs, b.Buf) } n, err := bufs.WriteTo(w) for _, buf := range b.bufs { putBuf(buf) } putBuf(b.toPool) b.bufs = nil b.Buf = nil b.toPool = nil return int(n), err } // BuildBytes creates a single byte slice with all the contents of the buffer. Data is // copied if it does not fit in a single chunk. You can optionally provide one byte // slice as argument that it will try to reuse. func (b *Buffer) BuildBytes(reuse ...[]byte) []byte { if len(b.bufs) == 0 { ret := b.Buf b.toPool = nil b.Buf = nil return ret } var ret []byte size := b.Size() // If we got a buffer as argument and it is big enough, reuse it. if len(reuse) == 1 && cap(reuse[0]) >= size { ret = reuse[0][:0] } else { ret = make([]byte, 0, size) } for _, buf := range b.bufs { ret = append(ret, buf...) putBuf(buf) } ret = append(ret, b.Buf...) putBuf(b.toPool) b.bufs = nil b.toPool = nil b.Buf = nil return ret } type readCloser struct { offset int bufs [][]byte } func (r *readCloser) Read(p []byte) (n int, err error) { for _, buf := range r.bufs { // Copy as much as we can. x := copy(p[n:], buf[r.offset:]) n += x // Increment how much we filled. // Did we empty the whole buffer? if r.offset+x == len(buf) { // On to the next buffer. r.offset = 0 r.bufs = r.bufs[1:] // We can release this buffer. putBuf(buf) } else { r.offset += x } if n == len(p) { break } } // No buffers left or nothing read? if len(r.bufs) == 0 { err = io.EOF } return } func (r *readCloser) Close() error { // Release all remaining buffers. for _, buf := range r.bufs { putBuf(buf) } // In case Close gets called multiple times. r.bufs = nil return nil } // ReadCloser creates an io.ReadCloser with all the contents of the buffer. func (b *Buffer) ReadCloser() io.ReadCloser { ret := &readCloser{0, append(b.bufs, b.Buf)} b.bufs = nil b.toPool = nil b.Buf = nil return ret }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/leodido/go-urn/urn.go
vendor/github.com/leodido/go-urn/urn.go
package urn import ( "encoding/json" "fmt" "strings" ) const errInvalidURN = "invalid URN: %s" // URN represents an Uniform Resource Name. // // The general form represented is: // // urn:<id>:<ss> // // Details at https://tools.ietf.org/html/rfc2141. type URN struct { prefix string // Static prefix. Equal to "urn" when empty. ID string // Namespace identifier (NID) SS string // Namespace specific string (NSS) norm string // Normalized namespace specific string kind Kind scim *SCIM rComponent string // RFC8141 qComponent string // RFC8141 fComponent string // RFC8141 rStart bool // RFC8141 qStart bool // RFC8141 tolower []int } // Normalize turns the receiving URN into its norm version. // // Which means: lowercase prefix, lowercase namespace identifier, and immutate namespace specific string chars (except <hex> tokens which are lowercased). func (u *URN) Normalize() *URN { return &URN{ prefix: "urn", ID: strings.ToLower(u.ID), SS: u.norm, // rComponent: u.rComponent, // qComponent: u.qComponent, // fComponent: u.fComponent, } } // Equal checks the lexical equivalence of the current URN with another one. func (u *URN) Equal(x *URN) bool { if x == nil { return false } nu := u.Normalize() nx := x.Normalize() return nu.prefix == nx.prefix && nu.ID == nx.ID && nu.SS == nx.SS } // String reassembles the URN into a valid URN string. // // This requires both ID and SS fields to be non-empty. // Otherwise it returns an empty string. // // Default URN prefix is "urn". func (u *URN) String() string { var res string if u.ID != "" && u.SS != "" { if u.prefix == "" { res += "urn" } res += u.prefix + ":" + u.ID + ":" + u.SS if u.rComponent != "" { res += "?+" + u.rComponent } if u.qComponent != "" { res += "?=" + u.qComponent } if u.fComponent != "" { res += "#" + u.fComponent } } return res } // Parse is responsible to create an URN instance from a byte array matching the correct URN syntax (RFC 2141). func Parse(u []byte, options ...Option) (*URN, bool) { urn, err := NewMachine(options...).Parse(u) if err != nil { return nil, false } return urn, true } // MarshalJSON marshals the URN to JSON string form (e.g. `"urn:oid:1.2.3.4"`). func (u URN) MarshalJSON() ([]byte, error) { return json.Marshal(u.String()) } // UnmarshalJSON unmarshals a URN from JSON string form (e.g. `"urn:oid:1.2.3.4"`). func (u *URN) UnmarshalJSON(bytes []byte) error { var str string if err := json.Unmarshal(bytes, &str); err != nil { return err } if value, ok := Parse([]byte(str)); !ok { return fmt.Errorf(errInvalidURN, str) } else { *u = *value } return nil } func (u *URN) IsSCIM() bool { return u.kind == RFC7643 } func (u *URN) SCIM() *SCIM { if u.kind != RFC7643 { return nil } return u.scim } func (u *URN) RFC() Kind { return u.kind } func (u *URN) FComponent() string { return u.fComponent } func (u *URN) QComponent() string { return u.qComponent } func (u *URN) RComponent() string { return u.rComponent }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/leodido/go-urn/scim.go
vendor/github.com/leodido/go-urn/scim.go
package urn import ( "encoding/json" "fmt" scimschema "github.com/leodido/go-urn/scim/schema" ) const errInvalidSCIMURN = "invalid SCIM URN: %s" type SCIM struct { Type scimschema.Type Name string Other string pos int } func (s SCIM) MarshalJSON() ([]byte, error) { return json.Marshal(s.String()) } func (s *SCIM) UnmarshalJSON(bytes []byte) error { var str string if err := json.Unmarshal(bytes, &str); err != nil { return err } // Parse as SCIM value, ok := Parse([]byte(str), WithParsingMode(RFC7643Only)) if !ok { return fmt.Errorf(errInvalidSCIMURN, str) } if value.RFC() != RFC7643 { return fmt.Errorf(errInvalidSCIMURN, str) } *s = *value.SCIM() return nil } func (s *SCIM) String() string { ret := fmt.Sprintf("urn:ietf:params:scim:%s:%s", s.Type.String(), s.Name) if s.Other != "" { ret += fmt.Sprintf(":%s", s.Other) } return ret }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/leodido/go-urn/urn8141.go
vendor/github.com/leodido/go-urn/urn8141.go
package urn import ( "encoding/json" "fmt" ) const errInvalidURN8141 = "invalid URN per RFC 8141: %s" type URN8141 struct { *URN } func (u URN8141) MarshalJSON() ([]byte, error) { return json.Marshal(u.String()) } func (u *URN8141) UnmarshalJSON(bytes []byte) error { var str string if err := json.Unmarshal(bytes, &str); err != nil { return err } if value, ok := Parse([]byte(str), WithParsingMode(RFC8141Only)); !ok { return fmt.Errorf(errInvalidURN8141, str) } else { *u = URN8141{value} } return nil }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/leodido/go-urn/kind.go
vendor/github.com/leodido/go-urn/kind.go
package urn type Kind int const ( NONE Kind = iota RFC2141 RFC7643 RFC8141 )
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/leodido/go-urn/options.go
vendor/github.com/leodido/go-urn/options.go
package urn type Option func(Machine) func WithParsingMode(mode ParsingMode) Option { return func(m Machine) { m.WithParsingMode(mode) } }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/leodido/go-urn/parsing_mode.go
vendor/github.com/leodido/go-urn/parsing_mode.go
package urn type ParsingMode int const ( Default ParsingMode = iota RFC2141Only RFC7643Only RFC8141Only ) const DefaultParsingMode = RFC2141Only
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/leodido/go-urn/machine.go
vendor/github.com/leodido/go-urn/machine.go
package urn import ( "fmt" scimschema "github.com/leodido/go-urn/scim/schema" ) var ( errPrefix = "expecting the prefix to be the \"urn\" string (whatever case) [col %d]" errIdentifier = "expecting the identifier to be string (1..31 alnum chars, also containing dashes but not at its beginning) [col %d]" errSpecificString = "expecting the specific string to be a string containing alnum, hex, or others ([()+,-.:=@;$_!*']) chars [col %d]" errNoUrnWithinID = "expecting the identifier to not contain the \"urn\" reserved string [col %d]" errHex = "expecting the percent encoded chars to be well-formed (%%alnum{2}) [col %d]" errSCIMNamespace = "expecing the SCIM namespace identifier (ietf:params:scim) [col %d]" errSCIMType = "expecting a correct SCIM type (schemas, api, param) [col %d]" errSCIMName = "expecting one or more alnum char in the SCIM name part [col %d]" errSCIMOther = "expecting a well-formed other SCIM part [col %d]" errSCIMOtherIncomplete = "expecting a not empty SCIM other part after colon [col %d]" err8141InformalID = "informal URN namespace must be in the form urn-[1-9][0-9] [col %d]" err8141SpecificString = "expecting the specific string to contain alnum, hex, or others ([~&()+,-.:=@;$_!*'] or [/?] not in first position) chars [col %d]" err8141Identifier = "expecting the indentifier to be a string with (length 2 to 32 chars) containing alnum (or dashes) not starting or ending with a dash [col %d]" err8141RComponentStart = "expecting only one r-component (starting with the ?+ sequence) [col %d]" err8141QComponentStart = "expecting only one q-component (starting with the ?= sequence) [col %d]" err8141MalformedRComp = "expecting a non-empty r-component containing alnum, hex, or others ([~&()+,-.:=@;$_!*'] or [/?] but not at its beginning) [col %d]" err8141MalformedQComp = "expecting a non-empty q-component containing alnum, hex, or others ([~&()+,-.:=@;$_!*'] or [/?] but not at its beginning) [col %d]" ) var _toStateActions []byte = []byte{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 33, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, } var _eofActions []byte = []byte{ 0, 1, 1, 1, 1, 4, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 8, 9, 9, 4, 4, 11, 1, 1, 1, 1, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 14, 14, 14, 14, 16, 18, 20, 20, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 1, 1, 1, 1, 21, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 23, 24, 24, 25, 25, 0, 26, 28, 28, 29, 29, 30, 30, 26, 26, 31, 31, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 21, 21, 22, 22, 22, 34, 34, 35, 37, 37, 38, 40, 41, 41, 38, 42, 42, 42, 44, 42, 48, 48, 48, 50, 44, 50, 0, } const start int = 1 const firstFinal int = 172 const enScimOnly int = 44 const enRfc8141Only int = 83 const enFail int = 193 const enMain int = 1 // Machine is the interface representing the FSM type Machine interface { Error() error Parse(input []byte) (*URN, error) WithParsingMode(ParsingMode) } type machine struct { data []byte cs int p, pe, eof, pb int err error startParsingAt int parsingMode ParsingMode parsingModeSet bool } // NewMachine creates a new FSM able to parse RFC 2141 strings. func NewMachine(options ...Option) Machine { m := &machine{ parsingModeSet: false, } for _, o := range options { o(m) } // Set default parsing mode if !m.parsingModeSet { m.WithParsingMode(DefaultParsingMode) } return m } // Err returns the error that occurred on the last call to Parse. // // If the result is nil, then the line was parsed successfully. func (m *machine) Error() error { return m.err } func (m *machine) text() []byte { return m.data[m.pb:m.p] } // Parse parses the input byte array as a RFC 2141 or RFC7643 string. func (m *machine) Parse(input []byte) (*URN, error) { m.data = input m.p = 0 m.pb = 0 m.pe = len(input) m.eof = len(input) m.err = nil m.cs = m.startParsingAt output := &URN{ tolower: []int{}, } { if (m.p) == (m.pe) { goto _testEof } if m.cs == 0 { goto _out } _resume: switch m.cs { case 1: switch (m.data)[(m.p)] { case 85: goto tr1 case 117: goto tr1 } goto tr0 case 0: goto _out case 2: switch (m.data)[(m.p)] { case 82: goto tr2 case 114: goto tr2 } goto tr0 case 3: switch (m.data)[(m.p)] { case 78: goto tr3 case 110: goto tr3 } goto tr0 case 4: if (m.data)[(m.p)] == 58 { goto tr4 } goto tr0 case 5: switch (m.data)[(m.p)] { case 85: goto tr7 case 117: goto tr7 } switch { case (m.data)[(m.p)] < 65: if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto tr6 } case (m.data)[(m.p)] > 90: if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { goto tr6 } default: goto tr6 } goto tr5 case 6: switch (m.data)[(m.p)] { case 45: goto tr9 case 58: goto tr10 } switch { case (m.data)[(m.p)] < 65: if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto tr9 } case (m.data)[(m.p)] > 90: if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { goto tr9 } default: goto tr9 } goto tr8 case 7: switch (m.data)[(m.p)] { case 45: goto tr11 case 58: goto tr10 } switch { case (m.data)[(m.p)] < 65: if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto tr11 } case (m.data)[(m.p)] > 90: if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { goto tr11 } default: goto tr11 } goto tr8 case 8: switch (m.data)[(m.p)] { case 45: goto tr12 case 58: goto tr10 } switch { case (m.data)[(m.p)] < 65: if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto tr12 } case (m.data)[(m.p)] > 90: if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { goto tr12 } default: goto tr12 } goto tr8 case 9: switch (m.data)[(m.p)] { case 45: goto tr13 case 58: goto tr10 } switch { case (m.data)[(m.p)] < 65: if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto tr13 } case (m.data)[(m.p)] > 90: if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { goto tr13 } default: goto tr13 } goto tr8 case 10: switch (m.data)[(m.p)] { case 45: goto tr14 case 58: goto tr10 } switch { case (m.data)[(m.p)] < 65: if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto tr14 } case (m.data)[(m.p)] > 90: if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { goto tr14 } default: goto tr14 } goto tr8 case 11: switch (m.data)[(m.p)] { case 45: goto tr15 case 58: goto tr10 } switch { case (m.data)[(m.p)] < 65: if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto tr15 } case (m.data)[(m.p)] > 90: if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { goto tr15 } default: goto tr15 } goto tr8 case 12: switch (m.data)[(m.p)] { case 45: goto tr16 case 58: goto tr10 } switch { case (m.data)[(m.p)] < 65: if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto tr16 } case (m.data)[(m.p)] > 90: if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { goto tr16 } default: goto tr16 } goto tr8 case 13: switch (m.data)[(m.p)] { case 45: goto tr17 case 58: goto tr10 } switch { case (m.data)[(m.p)] < 65: if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto tr17 } case (m.data)[(m.p)] > 90: if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { goto tr17 } default: goto tr17 } goto tr8 case 14: switch (m.data)[(m.p)] { case 45: goto tr18 case 58: goto tr10 } switch { case (m.data)[(m.p)] < 65: if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto tr18 } case (m.data)[(m.p)] > 90: if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { goto tr18 } default: goto tr18 } goto tr8 case 15: switch (m.data)[(m.p)] { case 45: goto tr19 case 58: goto tr10 } switch { case (m.data)[(m.p)] < 65: if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto tr19 } case (m.data)[(m.p)] > 90: if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { goto tr19 } default: goto tr19 } goto tr8 case 16: switch (m.data)[(m.p)] { case 45: goto tr20 case 58: goto tr10 } switch { case (m.data)[(m.p)] < 65: if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto tr20 } case (m.data)[(m.p)] > 90: if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { goto tr20 } default: goto tr20 } goto tr8 case 17: switch (m.data)[(m.p)] { case 45: goto tr21 case 58: goto tr10 } switch { case (m.data)[(m.p)] < 65: if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto tr21 } case (m.data)[(m.p)] > 90: if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { goto tr21 } default: goto tr21 } goto tr8 case 18: switch (m.data)[(m.p)] { case 45: goto tr22 case 58: goto tr10 } switch { case (m.data)[(m.p)] < 65: if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto tr22 } case (m.data)[(m.p)] > 90: if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { goto tr22 } default: goto tr22 } goto tr8 case 19: switch (m.data)[(m.p)] { case 45: goto tr23 case 58: goto tr10 } switch { case (m.data)[(m.p)] < 65: if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto tr23 } case (m.data)[(m.p)] > 90: if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { goto tr23 } default: goto tr23 } goto tr8 case 20: switch (m.data)[(m.p)] { case 45: goto tr24 case 58: goto tr10 } switch { case (m.data)[(m.p)] < 65: if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto tr24 } case (m.data)[(m.p)] > 90: if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { goto tr24 } default: goto tr24 } goto tr8 case 21: switch (m.data)[(m.p)] { case 45: goto tr25 case 58: goto tr10 } switch { case (m.data)[(m.p)] < 65: if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto tr25 } case (m.data)[(m.p)] > 90: if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { goto tr25 } default: goto tr25 } goto tr8 case 22: switch (m.data)[(m.p)] { case 45: goto tr26 case 58: goto tr10 } switch { case (m.data)[(m.p)] < 65: if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto tr26 } case (m.data)[(m.p)] > 90: if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { goto tr26 } default: goto tr26 } goto tr8 case 23: switch (m.data)[(m.p)] { case 45: goto tr27 case 58: goto tr10 } switch { case (m.data)[(m.p)] < 65: if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto tr27 } case (m.data)[(m.p)] > 90: if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { goto tr27 } default: goto tr27 } goto tr8 case 24: switch (m.data)[(m.p)] { case 45: goto tr28 case 58: goto tr10 } switch { case (m.data)[(m.p)] < 65: if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto tr28 } case (m.data)[(m.p)] > 90: if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { goto tr28 } default: goto tr28 } goto tr8 case 25: switch (m.data)[(m.p)] { case 45: goto tr29 case 58: goto tr10 } switch { case (m.data)[(m.p)] < 65: if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto tr29 } case (m.data)[(m.p)] > 90: if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { goto tr29 } default: goto tr29 } goto tr8 case 26: switch (m.data)[(m.p)] { case 45: goto tr30 case 58: goto tr10 } switch { case (m.data)[(m.p)] < 65: if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto tr30 } case (m.data)[(m.p)] > 90: if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { goto tr30 } default: goto tr30 } goto tr8 case 27: switch (m.data)[(m.p)] { case 45: goto tr31 case 58: goto tr10 } switch { case (m.data)[(m.p)] < 65: if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto tr31 } case (m.data)[(m.p)] > 90: if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { goto tr31 } default: goto tr31 } goto tr8 case 28: switch (m.data)[(m.p)] { case 45: goto tr32 case 58: goto tr10 } switch { case (m.data)[(m.p)] < 65: if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto tr32 } case (m.data)[(m.p)] > 90: if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { goto tr32 } default: goto tr32 } goto tr8 case 29: switch (m.data)[(m.p)] { case 45: goto tr33 case 58: goto tr10 } switch { case (m.data)[(m.p)] < 65: if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto tr33 } case (m.data)[(m.p)] > 90: if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { goto tr33 } default: goto tr33 } goto tr8 case 30: switch (m.data)[(m.p)] { case 45: goto tr34 case 58: goto tr10 } switch { case (m.data)[(m.p)] < 65: if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto tr34 } case (m.data)[(m.p)] > 90: if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { goto tr34 } default: goto tr34 } goto tr8 case 31: switch (m.data)[(m.p)] { case 45: goto tr35 case 58: goto tr10 } switch { case (m.data)[(m.p)] < 65: if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto tr35 } case (m.data)[(m.p)] > 90: if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { goto tr35 } default: goto tr35 } goto tr8 case 32: switch (m.data)[(m.p)] { case 45: goto tr36 case 58: goto tr10 } switch { case (m.data)[(m.p)] < 65: if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto tr36 } case (m.data)[(m.p)] > 90: if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { goto tr36 } default: goto tr36 } goto tr8 case 33: switch (m.data)[(m.p)] { case 45: goto tr37 case 58: goto tr10 } switch { case (m.data)[(m.p)] < 65: if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto tr37 } case (m.data)[(m.p)] > 90: if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { goto tr37 } default: goto tr37 } goto tr8 case 34: switch (m.data)[(m.p)] { case 45: goto tr38 case 58: goto tr10 } switch { case (m.data)[(m.p)] < 65: if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto tr38 } case (m.data)[(m.p)] > 90: if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { goto tr38 } default: goto tr38 } goto tr8 case 35: switch (m.data)[(m.p)] { case 45: goto tr39 case 58: goto tr10 } switch { case (m.data)[(m.p)] < 65: if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto tr39 } case (m.data)[(m.p)] > 90: if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { goto tr39 } default: goto tr39 } goto tr8 case 36: switch (m.data)[(m.p)] { case 45: goto tr40 case 58: goto tr10 } switch { case (m.data)[(m.p)] < 65: if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto tr40 } case (m.data)[(m.p)] > 90: if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { goto tr40 } default: goto tr40 } goto tr8 case 37: if (m.data)[(m.p)] == 58 { goto tr10 } goto tr8 case 38: switch (m.data)[(m.p)] { case 33: goto tr42 case 36: goto tr42 case 37: goto tr43 case 61: goto tr42 case 95: goto tr42 } switch { case (m.data)[(m.p)] < 48: if 39 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 46 { goto tr42 } case (m.data)[(m.p)] > 59: switch { case (m.data)[(m.p)] > 90: if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { goto tr42 } case (m.data)[(m.p)] >= 64: goto tr42 } default: goto tr42 } goto tr41 case 172: switch (m.data)[(m.p)] { case 33: goto tr212 case 36: goto tr212 case 37: goto tr213 case 61: goto tr212 case 95: goto tr212 } switch { case (m.data)[(m.p)] < 48: if 39 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 46 { goto tr212 } case (m.data)[(m.p)] > 59: switch { case (m.data)[(m.p)] > 90: if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { goto tr212 } case (m.data)[(m.p)] >= 64: goto tr212 } default: goto tr212 } goto tr41 case 39: switch { case (m.data)[(m.p)] < 65: if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto tr45 } case (m.data)[(m.p)] > 90: if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { goto tr45 } default: goto tr46 } goto tr44 case 40: switch { case (m.data)[(m.p)] < 65: if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto tr47 } case (m.data)[(m.p)] > 90: if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { goto tr47 } default: goto tr48 } goto tr44 case 173: switch (m.data)[(m.p)] { case 33: goto tr212 case 36: goto tr212 case 37: goto tr213 case 61: goto tr212 case 95: goto tr212 } switch { case (m.data)[(m.p)] < 48: if 39 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 46 { goto tr212 } case (m.data)[(m.p)] > 59: switch { case (m.data)[(m.p)] > 90: if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { goto tr212 } case (m.data)[(m.p)] >= 64: goto tr212 } default: goto tr212 } goto tr44 case 41: switch (m.data)[(m.p)] { case 45: goto tr9 case 58: goto tr10 case 82: goto tr49 case 114: goto tr49 } switch { case (m.data)[(m.p)] < 65: if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto tr9 } case (m.data)[(m.p)] > 90: if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { goto tr9 } default: goto tr9 } goto tr5 case 42: switch (m.data)[(m.p)] { case 45: goto tr11 case 58: goto tr10 case 78: goto tr50 case 110: goto tr50 } switch { case (m.data)[(m.p)] < 65: if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto tr11 } case (m.data)[(m.p)] > 90: if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { goto tr11 } default: goto tr11 } goto tr5 case 43: if (m.data)[(m.p)] == 45 { goto tr12 } switch { case (m.data)[(m.p)] < 65: if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto tr12 } case (m.data)[(m.p)] > 90: if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { goto tr12 } default: goto tr12 } goto tr51 case 44: switch (m.data)[(m.p)] { case 85: goto tr52 case 117: goto tr52 } goto tr0 case 45: switch (m.data)[(m.p)] { case 82: goto tr53 case 114: goto tr53 } goto tr0 case 46: switch (m.data)[(m.p)] { case 78: goto tr54 case 110: goto tr54 } goto tr0 case 47: if (m.data)[(m.p)] == 58 { goto tr55 } goto tr0 case 48: if (m.data)[(m.p)] == 105 { goto tr57 } goto tr56 case 49: if (m.data)[(m.p)] == 101 { goto tr58 } goto tr56 case 50: if (m.data)[(m.p)] == 116 { goto tr59 } goto tr56 case 51: if (m.data)[(m.p)] == 102 { goto tr60 } goto tr56 case 52: if (m.data)[(m.p)] == 58 { goto tr61 } goto tr56 case 53: if (m.data)[(m.p)] == 112 { goto tr62 } goto tr56 case 54: if (m.data)[(m.p)] == 97 { goto tr63 } goto tr56 case 55: if (m.data)[(m.p)] == 114 { goto tr64 } goto tr56 case 56: if (m.data)[(m.p)] == 97 { goto tr65 } goto tr56 case 57: if (m.data)[(m.p)] == 109 { goto tr66 } goto tr56 case 58: if (m.data)[(m.p)] == 115 { goto tr67 } goto tr56 case 59: if (m.data)[(m.p)] == 58 { goto tr68 } goto tr56 case 60: if (m.data)[(m.p)] == 115 { goto tr69 } goto tr56 case 61: if (m.data)[(m.p)] == 99 { goto tr70 } goto tr56 case 62: if (m.data)[(m.p)] == 105 { goto tr71 } goto tr56 case 63: if (m.data)[(m.p)] == 109 { goto tr72 } goto tr56 case 64: if (m.data)[(m.p)] == 58 { goto tr73 } goto tr56 case 65: switch (m.data)[(m.p)] { case 97: goto tr75 case 112: goto tr76 case 115: goto tr77 } goto tr74 case 66: if (m.data)[(m.p)] == 112 { goto tr78 } goto tr74 case 67: if (m.data)[(m.p)] == 105 { goto tr79 } goto tr74 case 68: if (m.data)[(m.p)] == 58 { goto tr80 } goto tr74 case 69: switch { case (m.data)[(m.p)] < 65: if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto tr82 } case (m.data)[(m.p)] > 90: if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { goto tr82 } default: goto tr82 } goto tr81 case 174: if (m.data)[(m.p)] == 58 { goto tr215 } switch { case (m.data)[(m.p)] < 65: if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto tr214 } case (m.data)[(m.p)] > 90: if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { goto tr214 } default: goto tr214 } goto tr81 case 70: switch (m.data)[(m.p)] { case 33: goto tr84 case 36: goto tr84 case 37: goto tr85 case 61: goto tr84 case 95: goto tr84 } switch { case (m.data)[(m.p)] < 48: if 39 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 46 { goto tr84 } case (m.data)[(m.p)] > 59: switch { case (m.data)[(m.p)] > 90: if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { goto tr84 } case (m.data)[(m.p)] >= 64: goto tr84 } default: goto tr84 } goto tr83 case 175: switch (m.data)[(m.p)] { case 33: goto tr216 case 36: goto tr216 case 37: goto tr217 case 61: goto tr216 case 95: goto tr216 } switch { case (m.data)[(m.p)] < 48: if 39 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 46 { goto tr216 } case (m.data)[(m.p)] > 59: switch { case (m.data)[(m.p)] > 90: if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { goto tr216 } case (m.data)[(m.p)] >= 64: goto tr216 } default: goto tr216 } goto tr83 case 71: switch { case (m.data)[(m.p)] < 65: if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto tr87 } case (m.data)[(m.p)] > 90: if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { goto tr87 } default: goto tr88 } goto tr86 case 72: switch { case (m.data)[(m.p)] < 65: if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto tr89 } case (m.data)[(m.p)] > 90: if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { goto tr89 } default: goto tr90 } goto tr86 case 176: switch (m.data)[(m.p)] { case 33: goto tr216 case 36: goto tr216 case 37: goto tr217 case 61: goto tr216 case 95: goto tr216 } switch { case (m.data)[(m.p)] < 48: if 39 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 46 { goto tr216 } case (m.data)[(m.p)] > 59: switch { case (m.data)[(m.p)] > 90: if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { goto tr216 } case (m.data)[(m.p)] >= 64: goto tr216 } default: goto tr216 } goto tr86 case 73: if (m.data)[(m.p)] == 97 { goto tr91 } goto tr74 case 74: if (m.data)[(m.p)] == 114 { goto tr92 } goto tr74 case 75: if (m.data)[(m.p)] == 97 { goto tr93 } goto tr74 case 76: if (m.data)[(m.p)] == 109 { goto tr79 } goto tr74 case 77: if (m.data)[(m.p)] == 99 { goto tr94 } goto tr74 case 78: if (m.data)[(m.p)] == 104 { goto tr95 } goto tr74 case 79: if (m.data)[(m.p)] == 101 { goto tr96 } goto tr74 case 80: if (m.data)[(m.p)] == 109 { goto tr97 } goto tr74 case 81: if (m.data)[(m.p)] == 97 { goto tr98 } goto tr74 case 82: if (m.data)[(m.p)] == 115 { goto tr79 } goto tr74 case 83: switch (m.data)[(m.p)] { case 85: goto tr99 case 117: goto tr99 } goto tr0 case 84: switch (m.data)[(m.p)] { case 82: goto tr100 case 114: goto tr100 } goto tr0 case 85: switch (m.data)[(m.p)] { case 78: goto tr101 case 110: goto tr101 } goto tr0 case 86: if (m.data)[(m.p)] == 58 { goto tr102 } goto tr0 case 87: switch (m.data)[(m.p)] { case 85: goto tr105 case 117: goto tr105 } switch { case (m.data)[(m.p)] < 65: if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto tr104 } case (m.data)[(m.p)] > 90: if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { goto tr104 } default: goto tr104 } goto tr103 case 88: if (m.data)[(m.p)] == 45 { goto tr107 } switch { case (m.data)[(m.p)] < 65: if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto tr108 } case (m.data)[(m.p)] > 90: if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { goto tr108 } default: goto tr108 } goto tr106 case 89: if (m.data)[(m.p)] == 45 { goto tr109 } switch { case (m.data)[(m.p)] < 65: if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto tr110 } case (m.data)[(m.p)] > 90: if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { goto tr110 } default: goto tr110 } goto tr106 case 90: if (m.data)[(m.p)] == 45 { goto tr111 } switch { case (m.data)[(m.p)] < 65: if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto tr112 } case (m.data)[(m.p)] > 90: if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { goto tr112 } default: goto tr112 } goto tr106 case 91: if (m.data)[(m.p)] == 45 { goto tr113 } switch { case (m.data)[(m.p)] < 65: if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto tr114 } case (m.data)[(m.p)] > 90: if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { goto tr114 } default: goto tr114 } goto tr106 case 92: if (m.data)[(m.p)] == 45 { goto tr115 } switch { case (m.data)[(m.p)] < 65: if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto tr116 } case (m.data)[(m.p)] > 90: if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { goto tr116 } default: goto tr116 } goto tr106 case 93: if (m.data)[(m.p)] == 45 { goto tr117 } switch { case (m.data)[(m.p)] < 65: if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto tr118 } case (m.data)[(m.p)] > 90: if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { goto tr118 } default: goto tr118 } goto tr106 case 94: if (m.data)[(m.p)] == 45 { goto tr119 } switch { case (m.data)[(m.p)] < 65: if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto tr120 } case (m.data)[(m.p)] > 90: if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { goto tr120 } default: goto tr120 } goto tr106 case 95: if (m.data)[(m.p)] == 45 { goto tr121 } switch { case (m.data)[(m.p)] < 65: if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto tr122 } case (m.data)[(m.p)] > 90: if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { goto tr122 } default: goto tr122 } goto tr106 case 96: if (m.data)[(m.p)] == 45 { goto tr123 } switch { case (m.data)[(m.p)] < 65: if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto tr124 } case (m.data)[(m.p)] > 90: if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { goto tr124 } default: goto tr124 } goto tr106 case 97: if (m.data)[(m.p)] == 45 { goto tr125 } switch { case (m.data)[(m.p)] < 65: if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto tr126 } case (m.data)[(m.p)] > 90: if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { goto tr126 } default: goto tr126 } goto tr106 case 98: if (m.data)[(m.p)] == 45 { goto tr127 } switch { case (m.data)[(m.p)] < 65: if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto tr128 } case (m.data)[(m.p)] > 90: if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { goto tr128 } default: goto tr128 } goto tr106 case 99: if (m.data)[(m.p)] == 45 { goto tr129 } switch { case (m.data)[(m.p)] < 65: if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto tr130 } case (m.data)[(m.p)] > 90: if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { goto tr130 } default: goto tr130 } goto tr106 case 100: if (m.data)[(m.p)] == 45 { goto tr131 } switch { case (m.data)[(m.p)] < 65: if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto tr132 } case (m.data)[(m.p)] > 90: if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { goto tr132 } default: goto tr132 } goto tr106 case 101: if (m.data)[(m.p)] == 45 { goto tr133 } switch { case (m.data)[(m.p)] < 65: if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto tr134 } case (m.data)[(m.p)] > 90: if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { goto tr134 } default: goto tr134 } goto tr106 case 102: if (m.data)[(m.p)] == 45 { goto tr135 } switch { case (m.data)[(m.p)] < 65: if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto tr136 } case (m.data)[(m.p)] > 90: if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { goto tr136 } default: goto tr136 } goto tr106 case 103: if (m.data)[(m.p)] == 45 { goto tr137 } switch { case (m.data)[(m.p)] < 65: if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto tr138 } case (m.data)[(m.p)] > 90: if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { goto tr138 } default: goto tr138 } goto tr106 case 104: if (m.data)[(m.p)] == 45 { goto tr139 } switch { case (m.data)[(m.p)] < 65: if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto tr140 } case (m.data)[(m.p)] > 90: if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { goto tr140 } default: goto tr140 } goto tr106 case 105: if (m.data)[(m.p)] == 45 { goto tr141 } switch { case (m.data)[(m.p)] < 65: if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto tr142 }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
true
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/leodido/go-urn/scim/schema/type.go
vendor/github.com/leodido/go-urn/scim/schema/type.go
package scimschema type Type int const ( Unsupported Type = iota Schemas API Param ) func (t Type) String() string { switch t { case Schemas: return "schemas" case API: return "api" case Param: return "param" } return "" } func TypeFromString(input string) Type { switch input { case "schemas": return Schemas case "api": return API case "param": return Param } return Unsupported }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/register.go
vendor/github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/register.go
package k8scnicncfio const ( GroupName = "k8s.cni.cncf.io" )
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1/zz_generated.deepcopy.go
vendor/github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1/zz_generated.deepcopy.go
//go:build !ignore_autogenerated // +build !ignore_autogenerated /* Copyright 2021 The Kubernetes Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // Code generated by deepcopy-gen. DO NOT EDIT. package v1 import ( runtime "k8s.io/apimachinery/pkg/runtime" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DeviceInfo) DeepCopyInto(out *DeviceInfo) { *out = *in if in.Pci != nil { in, out := &in.Pci, &out.Pci *out = new(PciDevice) **out = **in } if in.Vdpa != nil { in, out := &in.Vdpa, &out.Vdpa *out = new(VdpaDevice) **out = **in } if in.VhostUser != nil { in, out := &in.VhostUser, &out.VhostUser *out = new(VhostDevice) **out = **in } if in.Memif != nil { in, out := &in.Memif, &out.Memif *out = new(MemifDevice) **out = **in } return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceInfo. func (in *DeviceInfo) DeepCopy() *DeviceInfo { if in == nil { return nil } out := new(DeviceInfo) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *MemifDevice) DeepCopyInto(out *MemifDevice) { *out = *in return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MemifDevice. func (in *MemifDevice) DeepCopy() *MemifDevice { if in == nil { return nil } out := new(MemifDevice) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NetworkAttachmentDefinition) DeepCopyInto(out *NetworkAttachmentDefinition) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) out.Spec = in.Spec return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkAttachmentDefinition. func (in *NetworkAttachmentDefinition) DeepCopy() *NetworkAttachmentDefinition { if in == nil { return nil } out := new(NetworkAttachmentDefinition) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. func (in *NetworkAttachmentDefinition) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } return nil } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NetworkAttachmentDefinitionList) DeepCopyInto(out *NetworkAttachmentDefinitionList) { *out = *in out.TypeMeta = in.TypeMeta in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]NetworkAttachmentDefinition, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkAttachmentDefinitionList. func (in *NetworkAttachmentDefinitionList) DeepCopy() *NetworkAttachmentDefinitionList { if in == nil { return nil } out := new(NetworkAttachmentDefinitionList) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. func (in *NetworkAttachmentDefinitionList) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } return nil } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NetworkAttachmentDefinitionSpec) DeepCopyInto(out *NetworkAttachmentDefinitionSpec) { *out = *in return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkAttachmentDefinitionSpec. func (in *NetworkAttachmentDefinitionSpec) DeepCopy() *NetworkAttachmentDefinitionSpec { if in == nil { return nil } out := new(NetworkAttachmentDefinitionSpec) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PciDevice) DeepCopyInto(out *PciDevice) { *out = *in return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PciDevice. func (in *PciDevice) DeepCopy() *PciDevice { if in == nil { return nil } out := new(PciDevice) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *VdpaDevice) DeepCopyInto(out *VdpaDevice) { *out = *in return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VdpaDevice. func (in *VdpaDevice) DeepCopy() *VdpaDevice { if in == nil { return nil } out := new(VdpaDevice) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *VhostDevice) DeepCopyInto(out *VhostDevice) { *out = *in return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VhostDevice. func (in *VhostDevice) DeepCopy() *VhostDevice { if in == nil { return nil } out := new(VhostDevice) in.DeepCopyInto(out) return out }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1/types.go
vendor/github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1/types.go
package v1 import ( "encoding/json" "errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "net" ) // +genclient // +genclient:noStatus // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +resourceName=network-attachment-definitions type NetworkAttachmentDefinition struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` Spec NetworkAttachmentDefinitionSpec `json:"spec"` } type NetworkAttachmentDefinitionSpec struct { Config string `json:"config"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object type NetworkAttachmentDefinitionList struct { metav1.TypeMeta `json:",inline"` metav1.ListMeta `json:"metadata"` Items []NetworkAttachmentDefinition `json:"items"` } // DNS contains values interesting for DNS resolvers // +k8s:deepcopy-gen=false type DNS struct { Nameservers []string `json:"nameservers,omitempty"` Domain string `json:"domain,omitempty"` Search []string `json:"search,omitempty"` Options []string `json:"options,omitempty"` } const ( DeviceInfoTypePCI = "pci" DeviceInfoTypeVHostUser = "vhost-user" DeviceInfoTypeMemif = "memif" DeviceInfoTypeVDPA = "vdpa" DeviceInfoVersion = "1.1.0" ) // DeviceInfo contains the information of the device associated // with this network (if any) type DeviceInfo struct { Type string `json:"type,omitempty"` Version string `json:"version,omitempty"` Pci *PciDevice `json:"pci,omitempty"` Vdpa *VdpaDevice `json:"vdpa,omitempty"` VhostUser *VhostDevice `json:"vhost-user,omitempty"` Memif *MemifDevice `json:"memif,omitempty"` } type PciDevice struct { PciAddress string `json:"pci-address,omitempty"` Vhostnet string `json:"vhost-net,omitempty"` RdmaDevice string `json:"rdma-device,omitempty"` PfPciAddress string `json:"pf-pci-address,omitempty"` RepresentorDevice string `json:"representor-device,omitempty"` } type VdpaDevice struct { ParentDevice string `json:"parent-device,omitempty"` Driver string `json:"driver,omitempty"` Path string `json:"path,omitempty"` PciAddress string `json:"pci-address,omitempty"` PfPciAddress string `json:"pf-pci-address,omitempty"` RepresentorDevice string `json:"representor-device,omitempty"` } const ( VhostDeviceModeClient = "client" VhostDeviceModeServer = "server" ) type VhostDevice struct { Mode string `json:"mode,omitempty"` Path string `json:"path,omitempty"` } const ( MemifDeviceRoleMaster = "master" MemitDeviceRoleSlave = "slave" MemifDeviceModeEthernet = "ethernet" MemitDeviceModeIP = "ip" MemitDeviceModePunt = "punt" ) type MemifDevice struct { Role string `json:"role,omitempty"` Path string `json:"path,omitempty"` Mode string `json:"mode,omitempty"` } // NetworkStatus is for network status annotation for pod // +k8s:deepcopy-gen=false type NetworkStatus struct { Name string `json:"name"` Interface string `json:"interface,omitempty"` IPs []string `json:"ips,omitempty"` Mac string `json:"mac,omitempty"` Mtu int `json:"mtu,omitempty"` Default bool `json:"default,omitempty"` DNS DNS `json:"dns,omitempty"` DeviceInfo *DeviceInfo `json:"device-info,omitempty"` Gateway []string `json:"gateway,omitempty"` } // PortMapEntry for CNI PortMapEntry // +k8s:deepcopy-gen=false type PortMapEntry struct { HostPort int `json:"hostPort"` ContainerPort int `json:"containerPort"` Protocol string `json:"protocol,omitempty"` HostIP string `json:"hostIP,omitempty"` } // BandwidthEntry for CNI BandwidthEntry // +k8s:deepcopy-gen=false type BandwidthEntry struct { IngressRate int `json:"ingressRate"` IngressBurst int `json:"ingressBurst"` EgressRate int `json:"egressRate"` EgressBurst int `json:"egressBurst"` } // NetworkSelectionElement represents one element of the JSON format // Network Attachment Selection Annotation as described in section 4.1.2 // of the CRD specification. // +k8s:deepcopy-gen=false type NetworkSelectionElement struct { // Name contains the name of the Network object this element selects Name string `json:"name"` // Namespace contains the optional namespace that the network referenced // by Name exists in Namespace string `json:"namespace,omitempty"` // IPRequest contains an optional requested IP addresses for this network // attachment IPRequest []string `json:"ips,omitempty"` // MacRequest contains an optional requested MAC address for this // network attachment MacRequest string `json:"mac,omitempty"` // InfinibandGUIDRequest contains an optional requested Infiniband GUID // address for this network attachment InfinibandGUIDRequest string `json:"infiniband-guid,omitempty"` // InterfaceRequest contains an optional requested name for the // network interface this attachment will create in the container InterfaceRequest string `json:"interface,omitempty"` // PortMappingsRequest contains an optional requested port mapping // for the network PortMappingsRequest []*PortMapEntry `json:"portMappings,omitempty"` // BandwidthRequest contains an optional requested bandwidth for // the network BandwidthRequest *BandwidthEntry `json:"bandwidth,omitempty"` // CNIArgs contains additional CNI arguments for the network interface CNIArgs *map[string]interface{} `json:"cni-args,omitempty"` // GatewayRequest contains default route IP address for the pod GatewayRequest []net.IP `json:"default-route,omitempty"` // IPAMClaimReference container the IPAMClaim name where the IPs for this // attachment will be located. IPAMClaimReference string `json:"ipam-claim-reference,omitempty"` } func (nse *NetworkSelectionElement) UnmarshalJSON(b []byte) error { type networkSelectionElement NetworkSelectionElement var netSelectionElement networkSelectionElement if err := json.Unmarshal(b, &netSelectionElement); err != nil { return err } if len(netSelectionElement.IPRequest) > 0 && netSelectionElement.IPAMClaimReference != "" { return TooManyIPSources } *nse = NetworkSelectionElement(netSelectionElement) return nil } const ( // Pod annotation for network-attachment-definition NetworkAttachmentAnnot = "k8s.v1.cni.cncf.io/networks" // Pod annotation for network status NetworkStatusAnnot = "k8s.v1.cni.cncf.io/network-status" ) // NoK8sNetworkError indicates error, no network in kubernetes // +k8s:deepcopy-gen=false type NoK8sNetworkError struct { Message string } func (e *NoK8sNetworkError) Error() string { return string(e.Message) } var TooManyIPSources = errors.New("cannot provide a static IP and a reference of an IPAM claim in the same network selection element")
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1/register.go
vendor/github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1/register.go
package v1 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" k8scnicncfio "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io" ) // SchemeGroupVersion is group version used to register these objects var SchemeGroupVersion = schema.GroupVersion{Group: k8scnicncfio.GroupName, Version: "v1"} // Resource takes an unqualified resource and returns a Group qualified GroupResource func Resource(resource string) schema.GroupResource { return SchemeGroupVersion.WithResource(resource).GroupResource() } var ( // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. SchemeBuilder runtime.SchemeBuilder localSchemeBuilder = &SchemeBuilder AddToScheme = localSchemeBuilder.AddToScheme ) func init() { // We only register manually written functions here. The registration of the // generated functions takes place in the generated files. The separation // makes the code compile even when the generated files are missing. localSchemeBuilder.Register(addKnownTypes) } // Adds the list of known types to api.Scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &NetworkAttachmentDefinition{}, &NetworkAttachmentDefinitionList{}, ) metav1.AddToGroupVersion(scheme, SchemeGroupVersion) return nil }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1/doc.go
vendor/github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1/doc.go
// +k8s:deepcopy-gen=package,register // +groupName=k8s.cni.cncf.io // +groupGoName=K8sCniCncfIo package v1
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/mxk/go-flowrate/flowrate/flowrate.go
vendor/github.com/mxk/go-flowrate/flowrate/flowrate.go
// // Written by Maxim Khitrov (November 2012) // // Package flowrate provides the tools for monitoring and limiting the flow rate // of an arbitrary data stream. package flowrate import ( "math" "sync" "time" ) // Monitor monitors and limits the transfer rate of a data stream. type Monitor struct { mu sync.Mutex // Mutex guarding access to all internal fields active bool // Flag indicating an active transfer start time.Duration // Transfer start time (clock() value) bytes int64 // Total number of bytes transferred samples int64 // Total number of samples taken rSample float64 // Most recent transfer rate sample (bytes per second) rEMA float64 // Exponential moving average of rSample rPeak float64 // Peak transfer rate (max of all rSamples) rWindow float64 // rEMA window (seconds) sBytes int64 // Number of bytes transferred since sLast sLast time.Duration // Most recent sample time (stop time when inactive) sRate time.Duration // Sampling rate tBytes int64 // Number of bytes expected in the current transfer tLast time.Duration // Time of the most recent transfer of at least 1 byte } // New creates a new flow control monitor. Instantaneous transfer rate is // measured and updated for each sampleRate interval. windowSize determines the // weight of each sample in the exponential moving average (EMA) calculation. // The exact formulas are: // // sampleTime = currentTime - prevSampleTime // sampleRate = byteCount / sampleTime // weight = 1 - exp(-sampleTime/windowSize) // newRate = weight*sampleRate + (1-weight)*oldRate // // The default values for sampleRate and windowSize (if <= 0) are 100ms and 1s, // respectively. func New(sampleRate, windowSize time.Duration) *Monitor { if sampleRate = clockRound(sampleRate); sampleRate <= 0 { sampleRate = 5 * clockRate } if windowSize <= 0 { windowSize = 1 * time.Second } now := clock() return &Monitor{ active: true, start: now, rWindow: windowSize.Seconds(), sLast: now, sRate: sampleRate, tLast: now, } } // Update records the transfer of n bytes and returns n. It should be called // after each Read/Write operation, even if n is 0. func (m *Monitor) Update(n int) int { m.mu.Lock() m.update(n) m.mu.Unlock() return n } // IO is a convenience method intended to wrap io.Reader and io.Writer method // execution. It calls m.Update(n) and then returns (n, err) unmodified. func (m *Monitor) IO(n int, err error) (int, error) { return m.Update(n), err } // Done marks the transfer as finished and prevents any further updates or // limiting. Instantaneous and current transfer rates drop to 0. Update, IO, and // Limit methods become NOOPs. It returns the total number of bytes transferred. func (m *Monitor) Done() int64 { m.mu.Lock() if now := m.update(0); m.sBytes > 0 { m.reset(now) } m.active = false m.tLast = 0 n := m.bytes m.mu.Unlock() return n } // timeRemLimit is the maximum Status.TimeRem value. const timeRemLimit = 999*time.Hour + 59*time.Minute + 59*time.Second // Status represents the current Monitor status. All transfer rates are in bytes // per second rounded to the nearest byte. type Status struct { Active bool // Flag indicating an active transfer Start time.Time // Transfer start time Duration time.Duration // Time period covered by the statistics Idle time.Duration // Time since the last transfer of at least 1 byte Bytes int64 // Total number of bytes transferred Samples int64 // Total number of samples taken InstRate int64 // Instantaneous transfer rate CurRate int64 // Current transfer rate (EMA of InstRate) AvgRate int64 // Average transfer rate (Bytes / Duration) PeakRate int64 // Maximum instantaneous transfer rate BytesRem int64 // Number of bytes remaining in the transfer TimeRem time.Duration // Estimated time to completion Progress Percent // Overall transfer progress } // Status returns current transfer status information. The returned value // becomes static after a call to Done. func (m *Monitor) Status() Status { m.mu.Lock() now := m.update(0) s := Status{ Active: m.active, Start: clockToTime(m.start), Duration: m.sLast - m.start, Idle: now - m.tLast, Bytes: m.bytes, Samples: m.samples, PeakRate: round(m.rPeak), BytesRem: m.tBytes - m.bytes, Progress: percentOf(float64(m.bytes), float64(m.tBytes)), } if s.BytesRem < 0 { s.BytesRem = 0 } if s.Duration > 0 { rAvg := float64(s.Bytes) / s.Duration.Seconds() s.AvgRate = round(rAvg) if s.Active { s.InstRate = round(m.rSample) s.CurRate = round(m.rEMA) if s.BytesRem > 0 { if tRate := 0.8*m.rEMA + 0.2*rAvg; tRate > 0 { ns := float64(s.BytesRem) / tRate * 1e9 if ns > float64(timeRemLimit) { ns = float64(timeRemLimit) } s.TimeRem = clockRound(time.Duration(ns)) } } } } m.mu.Unlock() return s } // Limit restricts the instantaneous (per-sample) data flow to rate bytes per // second. It returns the maximum number of bytes (0 <= n <= want) that may be // transferred immediately without exceeding the limit. If block == true, the // call blocks until n > 0. want is returned unmodified if want < 1, rate < 1, // or the transfer is inactive (after a call to Done). // // At least one byte is always allowed to be transferred in any given sampling // period. Thus, if the sampling rate is 100ms, the lowest achievable flow rate // is 10 bytes per second. // // For usage examples, see the implementation of Reader and Writer in io.go. func (m *Monitor) Limit(want int, rate int64, block bool) (n int) { if want < 1 || rate < 1 { return want } m.mu.Lock() // Determine the maximum number of bytes that can be sent in one sample limit := round(float64(rate) * m.sRate.Seconds()) if limit <= 0 { limit = 1 } // If block == true, wait until m.sBytes < limit if now := m.update(0); block { for m.sBytes >= limit && m.active { now = m.waitNextSample(now) } } // Make limit <= want (unlimited if the transfer is no longer active) if limit -= m.sBytes; limit > int64(want) || !m.active { limit = int64(want) } m.mu.Unlock() if limit < 0 { limit = 0 } return int(limit) } // SetTransferSize specifies the total size of the data transfer, which allows // the Monitor to calculate the overall progress and time to completion. func (m *Monitor) SetTransferSize(bytes int64) { if bytes < 0 { bytes = 0 } m.mu.Lock() m.tBytes = bytes m.mu.Unlock() } // update accumulates the transferred byte count for the current sample until // clock() - m.sLast >= m.sRate. The monitor status is updated once the current // sample is done. func (m *Monitor) update(n int) (now time.Duration) { if !m.active { return } if now = clock(); n > 0 { m.tLast = now } m.sBytes += int64(n) if sTime := now - m.sLast; sTime >= m.sRate { t := sTime.Seconds() if m.rSample = float64(m.sBytes) / t; m.rSample > m.rPeak { m.rPeak = m.rSample } // Exponential moving average using a method similar to *nix load // average calculation. Longer sampling periods carry greater weight. if m.samples > 0 { w := math.Exp(-t / m.rWindow) m.rEMA = m.rSample + w*(m.rEMA-m.rSample) } else { m.rEMA = m.rSample } m.reset(now) } return } // reset clears the current sample state in preparation for the next sample. func (m *Monitor) reset(sampleTime time.Duration) { m.bytes += m.sBytes m.samples++ m.sBytes = 0 m.sLast = sampleTime } // waitNextSample sleeps for the remainder of the current sample. The lock is // released and reacquired during the actual sleep period, so it's possible for // the transfer to be inactive when this method returns. func (m *Monitor) waitNextSample(now time.Duration) time.Duration { const minWait = 5 * time.Millisecond current := m.sLast // sleep until the last sample time changes (ideally, just one iteration) for m.sLast == current && m.active { d := current + m.sRate - now m.mu.Unlock() if d < minWait { d = minWait } time.Sleep(d) m.mu.Lock() now = m.update(0) } return now }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false