repo
stringlengths
6
47
file_url
stringlengths
77
269
file_path
stringlengths
5
186
content
stringlengths
0
32.8k
language
stringclasses
1 value
license
stringclasses
7 values
commit_sha
stringlengths
40
40
retrieved_at
stringdate
2026-01-07 08:35:43
2026-01-07 08:55:24
truncated
bool
2 classes
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/goccy/go-json/internal/encoder/compiler.go
vendor/github.com/goccy/go-json/internal/encoder/compiler.go
package encoder import ( "context" "encoding" "encoding/json" "reflect" "sync" "sync/atomic" "unsafe" "github.com/goccy/go-json/internal/errors" "github.com/goccy/go-json/internal/runtime" ) type marshalerContext interface { MarshalJSON(context.Context) ([]byte, error) } var ( marshalJSONType = reflect.TypeOf((*json.Marshaler)(nil)).Elem() marshalJSONContextType = reflect.TypeOf((*marshalerContext)(nil)).Elem() marshalTextType = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem() jsonNumberType = reflect.TypeOf(json.Number("")) cachedOpcodeSets []*OpcodeSet cachedOpcodeMap unsafe.Pointer // map[uintptr]*OpcodeSet typeAddr *runtime.TypeAddr initEncoderOnce sync.Once ) func initEncoder() { initEncoderOnce.Do(func() { typeAddr = runtime.AnalyzeTypeAddr() if typeAddr == nil { typeAddr = &runtime.TypeAddr{} } cachedOpcodeSets = make([]*OpcodeSet, typeAddr.AddrRange>>typeAddr.AddrShift+1) }) } func loadOpcodeMap() map[uintptr]*OpcodeSet { p := atomic.LoadPointer(&cachedOpcodeMap) return *(*map[uintptr]*OpcodeSet)(unsafe.Pointer(&p)) } func storeOpcodeSet(typ uintptr, set *OpcodeSet, m map[uintptr]*OpcodeSet) { newOpcodeMap := make(map[uintptr]*OpcodeSet, len(m)+1) newOpcodeMap[typ] = set for k, v := range m { newOpcodeMap[k] = v } atomic.StorePointer(&cachedOpcodeMap, *(*unsafe.Pointer)(unsafe.Pointer(&newOpcodeMap))) } func compileToGetCodeSetSlowPath(typeptr uintptr) (*OpcodeSet, error) { opcodeMap := loadOpcodeMap() if codeSet, exists := opcodeMap[typeptr]; exists { return codeSet, nil } codeSet, err := newCompiler().compile(typeptr) if err != nil { return nil, err } storeOpcodeSet(typeptr, codeSet, opcodeMap) return codeSet, nil } func getFilteredCodeSetIfNeeded(ctx *RuntimeContext, codeSet *OpcodeSet) (*OpcodeSet, error) { if (ctx.Option.Flag & ContextOption) == 0 { return codeSet, nil } query := FieldQueryFromContext(ctx.Option.Context) if query == nil { return codeSet, nil } ctx.Option.Flag |= FieldQueryOption cacheCodeSet := codeSet.getQueryCache(query.Hash()) if cacheCodeSet != nil { return cacheCodeSet, nil } queryCodeSet, err := newCompiler().codeToOpcodeSet(codeSet.Type, codeSet.Code.Filter(query)) if err != nil { return nil, err } codeSet.setQueryCache(query.Hash(), queryCodeSet) return queryCodeSet, nil } type Compiler struct { structTypeToCode map[uintptr]*StructCode } func newCompiler() *Compiler { return &Compiler{ structTypeToCode: map[uintptr]*StructCode{}, } } func (c *Compiler) compile(typeptr uintptr) (*OpcodeSet, error) { // noescape trick for header.typ ( reflect.*rtype ) typ := *(**runtime.Type)(unsafe.Pointer(&typeptr)) code, err := c.typeToCode(typ) if err != nil { return nil, err } return c.codeToOpcodeSet(typ, code) } func (c *Compiler) codeToOpcodeSet(typ *runtime.Type, code Code) (*OpcodeSet, error) { noescapeKeyCode := c.codeToOpcode(&compileContext{ structTypeToCodes: map[uintptr]Opcodes{}, recursiveCodes: &Opcodes{}, }, typ, code) if err := noescapeKeyCode.Validate(); err != nil { return nil, err } escapeKeyCode := c.codeToOpcode(&compileContext{ structTypeToCodes: map[uintptr]Opcodes{}, recursiveCodes: &Opcodes{}, escapeKey: true, }, typ, code) noescapeKeyCode = copyOpcode(noescapeKeyCode) escapeKeyCode = copyOpcode(escapeKeyCode) setTotalLengthToInterfaceOp(noescapeKeyCode) setTotalLengthToInterfaceOp(escapeKeyCode) interfaceNoescapeKeyCode := copyToInterfaceOpcode(noescapeKeyCode) interfaceEscapeKeyCode := copyToInterfaceOpcode(escapeKeyCode) codeLength := noescapeKeyCode.TotalLength() return &OpcodeSet{ Type: typ, NoescapeKeyCode: noescapeKeyCode, EscapeKeyCode: escapeKeyCode, InterfaceNoescapeKeyCode: interfaceNoescapeKeyCode, InterfaceEscapeKeyCode: interfaceEscapeKeyCode, CodeLength: codeLength, EndCode: ToEndCode(interfaceNoescapeKeyCode), Code: code, QueryCache: map[string]*OpcodeSet{}, }, nil } func (c *Compiler) typeToCode(typ *runtime.Type) (Code, error) { switch { case c.implementsMarshalJSON(typ): return c.marshalJSONCode(typ) case c.implementsMarshalText(typ): return c.marshalTextCode(typ) } isPtr := false orgType := typ if typ.Kind() == reflect.Ptr { typ = typ.Elem() isPtr = true } switch { case c.implementsMarshalJSON(typ): return c.marshalJSONCode(orgType) case c.implementsMarshalText(typ): return c.marshalTextCode(orgType) } switch typ.Kind() { case reflect.Slice: elem := typ.Elem() if elem.Kind() == reflect.Uint8 { p := runtime.PtrTo(elem) if !c.implementsMarshalJSONType(p) && !p.Implements(marshalTextType) { return c.bytesCode(typ, isPtr) } } return c.sliceCode(typ) case reflect.Map: if isPtr { return c.ptrCode(runtime.PtrTo(typ)) } return c.mapCode(typ) case reflect.Struct: return c.structCode(typ, isPtr) case reflect.Int: return c.intCode(typ, isPtr) case reflect.Int8: return c.int8Code(typ, isPtr) case reflect.Int16: return c.int16Code(typ, isPtr) case reflect.Int32: return c.int32Code(typ, isPtr) case reflect.Int64: return c.int64Code(typ, isPtr) case reflect.Uint, reflect.Uintptr: return c.uintCode(typ, isPtr) case reflect.Uint8: return c.uint8Code(typ, isPtr) case reflect.Uint16: return c.uint16Code(typ, isPtr) case reflect.Uint32: return c.uint32Code(typ, isPtr) case reflect.Uint64: return c.uint64Code(typ, isPtr) case reflect.Float32: return c.float32Code(typ, isPtr) case reflect.Float64: return c.float64Code(typ, isPtr) case reflect.String: return c.stringCode(typ, isPtr) case reflect.Bool: return c.boolCode(typ, isPtr) case reflect.Interface: return c.interfaceCode(typ, isPtr) default: if isPtr && typ.Implements(marshalTextType) { typ = orgType } return c.typeToCodeWithPtr(typ, isPtr) } } func (c *Compiler) typeToCodeWithPtr(typ *runtime.Type, isPtr bool) (Code, error) { switch { case c.implementsMarshalJSON(typ): return c.marshalJSONCode(typ) case c.implementsMarshalText(typ): return c.marshalTextCode(typ) } switch typ.Kind() { case reflect.Ptr: return c.ptrCode(typ) case reflect.Slice: elem := typ.Elem() if elem.Kind() == reflect.Uint8 { p := runtime.PtrTo(elem) if !c.implementsMarshalJSONType(p) && !p.Implements(marshalTextType) { return c.bytesCode(typ, false) } } return c.sliceCode(typ) case reflect.Array: return c.arrayCode(typ) case reflect.Map: return c.mapCode(typ) case reflect.Struct: return c.structCode(typ, isPtr) case reflect.Interface: return c.interfaceCode(typ, false) case reflect.Int: return c.intCode(typ, false) case reflect.Int8: return c.int8Code(typ, false) case reflect.Int16: return c.int16Code(typ, false) case reflect.Int32: return c.int32Code(typ, false) case reflect.Int64: return c.int64Code(typ, false) case reflect.Uint: return c.uintCode(typ, false) case reflect.Uint8: return c.uint8Code(typ, false) case reflect.Uint16: return c.uint16Code(typ, false) case reflect.Uint32: return c.uint32Code(typ, false) case reflect.Uint64: return c.uint64Code(typ, false) case reflect.Uintptr: return c.uintCode(typ, false) case reflect.Float32: return c.float32Code(typ, false) case reflect.Float64: return c.float64Code(typ, false) case reflect.String: return c.stringCode(typ, false) case reflect.Bool: return c.boolCode(typ, false) } return nil, &errors.UnsupportedTypeError{Type: runtime.RType2Type(typ)} } const intSize = 32 << (^uint(0) >> 63) //nolint:unparam func (c *Compiler) intCode(typ *runtime.Type, isPtr bool) (*IntCode, error) { return &IntCode{typ: typ, bitSize: intSize, isPtr: isPtr}, nil } //nolint:unparam func (c *Compiler) int8Code(typ *runtime.Type, isPtr bool) (*IntCode, error) { return &IntCode{typ: typ, bitSize: 8, isPtr: isPtr}, nil } //nolint:unparam func (c *Compiler) int16Code(typ *runtime.Type, isPtr bool) (*IntCode, error) { return &IntCode{typ: typ, bitSize: 16, isPtr: isPtr}, nil } //nolint:unparam func (c *Compiler) int32Code(typ *runtime.Type, isPtr bool) (*IntCode, error) { return &IntCode{typ: typ, bitSize: 32, isPtr: isPtr}, nil } //nolint:unparam func (c *Compiler) int64Code(typ *runtime.Type, isPtr bool) (*IntCode, error) { return &IntCode{typ: typ, bitSize: 64, isPtr: isPtr}, nil } //nolint:unparam func (c *Compiler) uintCode(typ *runtime.Type, isPtr bool) (*UintCode, error) { return &UintCode{typ: typ, bitSize: intSize, isPtr: isPtr}, nil } //nolint:unparam func (c *Compiler) uint8Code(typ *runtime.Type, isPtr bool) (*UintCode, error) { return &UintCode{typ: typ, bitSize: 8, isPtr: isPtr}, nil } //nolint:unparam func (c *Compiler) uint16Code(typ *runtime.Type, isPtr bool) (*UintCode, error) { return &UintCode{typ: typ, bitSize: 16, isPtr: isPtr}, nil } //nolint:unparam func (c *Compiler) uint32Code(typ *runtime.Type, isPtr bool) (*UintCode, error) { return &UintCode{typ: typ, bitSize: 32, isPtr: isPtr}, nil } //nolint:unparam func (c *Compiler) uint64Code(typ *runtime.Type, isPtr bool) (*UintCode, error) { return &UintCode{typ: typ, bitSize: 64, isPtr: isPtr}, nil } //nolint:unparam func (c *Compiler) float32Code(typ *runtime.Type, isPtr bool) (*FloatCode, error) { return &FloatCode{typ: typ, bitSize: 32, isPtr: isPtr}, nil } //nolint:unparam func (c *Compiler) float64Code(typ *runtime.Type, isPtr bool) (*FloatCode, error) { return &FloatCode{typ: typ, bitSize: 64, isPtr: isPtr}, nil } //nolint:unparam func (c *Compiler) stringCode(typ *runtime.Type, isPtr bool) (*StringCode, error) { return &StringCode{typ: typ, isPtr: isPtr}, nil } //nolint:unparam func (c *Compiler) boolCode(typ *runtime.Type, isPtr bool) (*BoolCode, error) { return &BoolCode{typ: typ, isPtr: isPtr}, nil } //nolint:unparam func (c *Compiler) intStringCode(typ *runtime.Type) (*IntCode, error) { return &IntCode{typ: typ, bitSize: intSize, isString: true}, nil } //nolint:unparam func (c *Compiler) int8StringCode(typ *runtime.Type) (*IntCode, error) { return &IntCode{typ: typ, bitSize: 8, isString: true}, nil } //nolint:unparam func (c *Compiler) int16StringCode(typ *runtime.Type) (*IntCode, error) { return &IntCode{typ: typ, bitSize: 16, isString: true}, nil } //nolint:unparam func (c *Compiler) int32StringCode(typ *runtime.Type) (*IntCode, error) { return &IntCode{typ: typ, bitSize: 32, isString: true}, nil } //nolint:unparam func (c *Compiler) int64StringCode(typ *runtime.Type) (*IntCode, error) { return &IntCode{typ: typ, bitSize: 64, isString: true}, nil } //nolint:unparam func (c *Compiler) uintStringCode(typ *runtime.Type) (*UintCode, error) { return &UintCode{typ: typ, bitSize: intSize, isString: true}, nil } //nolint:unparam func (c *Compiler) uint8StringCode(typ *runtime.Type) (*UintCode, error) { return &UintCode{typ: typ, bitSize: 8, isString: true}, nil } //nolint:unparam func (c *Compiler) uint16StringCode(typ *runtime.Type) (*UintCode, error) { return &UintCode{typ: typ, bitSize: 16, isString: true}, nil } //nolint:unparam func (c *Compiler) uint32StringCode(typ *runtime.Type) (*UintCode, error) { return &UintCode{typ: typ, bitSize: 32, isString: true}, nil } //nolint:unparam func (c *Compiler) uint64StringCode(typ *runtime.Type) (*UintCode, error) { return &UintCode{typ: typ, bitSize: 64, isString: true}, nil } //nolint:unparam func (c *Compiler) bytesCode(typ *runtime.Type, isPtr bool) (*BytesCode, error) { return &BytesCode{typ: typ, isPtr: isPtr}, nil } //nolint:unparam func (c *Compiler) interfaceCode(typ *runtime.Type, isPtr bool) (*InterfaceCode, error) { return &InterfaceCode{typ: typ, isPtr: isPtr}, nil } //nolint:unparam func (c *Compiler) marshalJSONCode(typ *runtime.Type) (*MarshalJSONCode, error) { return &MarshalJSONCode{ typ: typ, isAddrForMarshaler: c.isPtrMarshalJSONType(typ), isNilableType: c.isNilableType(typ), isMarshalerContext: typ.Implements(marshalJSONContextType) || runtime.PtrTo(typ).Implements(marshalJSONContextType), }, nil } //nolint:unparam func (c *Compiler) marshalTextCode(typ *runtime.Type) (*MarshalTextCode, error) { return &MarshalTextCode{ typ: typ, isAddrForMarshaler: c.isPtrMarshalTextType(typ), isNilableType: c.isNilableType(typ), }, nil } func (c *Compiler) ptrCode(typ *runtime.Type) (*PtrCode, error) { code, err := c.typeToCodeWithPtr(typ.Elem(), true) if err != nil { return nil, err } ptr, ok := code.(*PtrCode) if ok { return &PtrCode{typ: typ, value: ptr.value, ptrNum: ptr.ptrNum + 1}, nil } return &PtrCode{typ: typ, value: code, ptrNum: 1}, nil } func (c *Compiler) sliceCode(typ *runtime.Type) (*SliceCode, error) { elem := typ.Elem() code, err := c.listElemCode(elem) if err != nil { return nil, err } if code.Kind() == CodeKindStruct { structCode := code.(*StructCode) structCode.enableIndirect() } return &SliceCode{typ: typ, value: code}, nil } func (c *Compiler) arrayCode(typ *runtime.Type) (*ArrayCode, error) { elem := typ.Elem() code, err := c.listElemCode(elem) if err != nil { return nil, err } if code.Kind() == CodeKindStruct { structCode := code.(*StructCode) structCode.enableIndirect() } return &ArrayCode{typ: typ, value: code}, nil } func (c *Compiler) mapCode(typ *runtime.Type) (*MapCode, error) { keyCode, err := c.mapKeyCode(typ.Key()) if err != nil { return nil, err } valueCode, err := c.mapValueCode(typ.Elem()) if err != nil { return nil, err } if valueCode.Kind() == CodeKindStruct { structCode := valueCode.(*StructCode) structCode.enableIndirect() } return &MapCode{typ: typ, key: keyCode, value: valueCode}, nil } func (c *Compiler) listElemCode(typ *runtime.Type) (Code, error) { switch { case c.implementsMarshalJSONType(typ) || c.implementsMarshalJSONType(runtime.PtrTo(typ)): return c.marshalJSONCode(typ) case !typ.Implements(marshalTextType) && runtime.PtrTo(typ).Implements(marshalTextType): return c.marshalTextCode(typ) case typ.Kind() == reflect.Map: return c.ptrCode(runtime.PtrTo(typ)) default: // isPtr was originally used to indicate whether the type of top level is pointer. // However, since the slice/array element is a specification that can get the pointer address, explicitly set isPtr to true. // See here for related issues: https://github.com/goccy/go-json/issues/370 code, err := c.typeToCodeWithPtr(typ, true) if err != nil { return nil, err } ptr, ok := code.(*PtrCode) if ok { if ptr.value.Kind() == CodeKindMap { ptr.ptrNum++ } } return code, nil } } func (c *Compiler) mapKeyCode(typ *runtime.Type) (Code, error) { switch { case c.implementsMarshalText(typ): return c.marshalTextCode(typ) } switch typ.Kind() { case reflect.Ptr: return c.ptrCode(typ) case reflect.String: return c.stringCode(typ, false) case reflect.Int: return c.intStringCode(typ) case reflect.Int8: return c.int8StringCode(typ) case reflect.Int16: return c.int16StringCode(typ) case reflect.Int32: return c.int32StringCode(typ) case reflect.Int64: return c.int64StringCode(typ) case reflect.Uint: return c.uintStringCode(typ) case reflect.Uint8: return c.uint8StringCode(typ) case reflect.Uint16: return c.uint16StringCode(typ) case reflect.Uint32: return c.uint32StringCode(typ) case reflect.Uint64: return c.uint64StringCode(typ) case reflect.Uintptr: return c.uintStringCode(typ) } return nil, &errors.UnsupportedTypeError{Type: runtime.RType2Type(typ)} } func (c *Compiler) mapValueCode(typ *runtime.Type) (Code, error) { switch typ.Kind() { case reflect.Map: return c.ptrCode(runtime.PtrTo(typ)) default: code, err := c.typeToCodeWithPtr(typ, false) if err != nil { return nil, err } ptr, ok := code.(*PtrCode) if ok { if ptr.value.Kind() == CodeKindMap { ptr.ptrNum++ } } return code, nil } } func (c *Compiler) structCode(typ *runtime.Type, isPtr bool) (*StructCode, error) { typeptr := uintptr(unsafe.Pointer(typ)) if code, exists := c.structTypeToCode[typeptr]; exists { derefCode := *code derefCode.isRecursive = true return &derefCode, nil } indirect := runtime.IfaceIndir(typ) code := &StructCode{typ: typ, isPtr: isPtr, isIndirect: indirect} c.structTypeToCode[typeptr] = code fieldNum := typ.NumField() tags := c.typeToStructTags(typ) fields := []*StructFieldCode{} for i, tag := range tags { isOnlyOneFirstField := i == 0 && fieldNum == 1 field, err := c.structFieldCode(code, tag, isPtr, isOnlyOneFirstField) if err != nil { return nil, err } if field.isAnonymous { structCode := field.getAnonymousStruct() if structCode != nil { structCode.removeFieldsByTags(tags) if c.isAssignableIndirect(field, isPtr) { if indirect { structCode.isIndirect = true } else { structCode.isIndirect = false } } } } else { structCode := field.getStruct() if structCode != nil { if indirect { // if parent is indirect type, set child indirect property to true structCode.isIndirect = true } else { // if parent is not indirect type, set child indirect property to false. // but if parent's indirect is false and isPtr is true, then indirect must be true. // Do this only if indirectConversion is enabled at the end of compileStruct. structCode.isIndirect = false } } } fields = append(fields, field) } fieldMap := c.getFieldMap(fields) duplicatedFieldMap := c.getDuplicatedFieldMap(fieldMap) code.fields = c.filteredDuplicatedFields(fields, duplicatedFieldMap) if !code.disableIndirectConversion && !indirect && isPtr { code.enableIndirect() } delete(c.structTypeToCode, typeptr) return code, nil } func toElemType(t *runtime.Type) *runtime.Type { for t.Kind() == reflect.Ptr { t = t.Elem() } return t } func (c *Compiler) structFieldCode(structCode *StructCode, tag *runtime.StructTag, isPtr, isOnlyOneFirstField bool) (*StructFieldCode, error) { field := tag.Field fieldType := runtime.Type2RType(field.Type) isIndirectSpecialCase := isPtr && isOnlyOneFirstField fieldCode := &StructFieldCode{ typ: fieldType, key: tag.Key, tag: tag, offset: field.Offset, isAnonymous: field.Anonymous && !tag.IsTaggedKey && toElemType(fieldType).Kind() == reflect.Struct, isTaggedKey: tag.IsTaggedKey, isNilableType: c.isNilableType(fieldType), isNilCheck: true, } switch { case c.isMovePointerPositionFromHeadToFirstMarshalJSONFieldCase(fieldType, isIndirectSpecialCase): code, err := c.marshalJSONCode(fieldType) if err != nil { return nil, err } fieldCode.value = code fieldCode.isAddrForMarshaler = true fieldCode.isNilCheck = false structCode.isIndirect = false structCode.disableIndirectConversion = true case c.isMovePointerPositionFromHeadToFirstMarshalTextFieldCase(fieldType, isIndirectSpecialCase): code, err := c.marshalTextCode(fieldType) if err != nil { return nil, err } fieldCode.value = code fieldCode.isAddrForMarshaler = true fieldCode.isNilCheck = false structCode.isIndirect = false structCode.disableIndirectConversion = true case isPtr && c.isPtrMarshalJSONType(fieldType): // *struct{ field T } // func (*T) MarshalJSON() ([]byte, error) code, err := c.marshalJSONCode(fieldType) if err != nil { return nil, err } fieldCode.value = code fieldCode.isAddrForMarshaler = true fieldCode.isNilCheck = false case isPtr && c.isPtrMarshalTextType(fieldType): // *struct{ field T } // func (*T) MarshalText() ([]byte, error) code, err := c.marshalTextCode(fieldType) if err != nil { return nil, err } fieldCode.value = code fieldCode.isAddrForMarshaler = true fieldCode.isNilCheck = false default: code, err := c.typeToCodeWithPtr(fieldType, isPtr) if err != nil { return nil, err } switch code.Kind() { case CodeKindPtr, CodeKindInterface: fieldCode.isNextOpPtrType = true } fieldCode.value = code } return fieldCode, nil } func (c *Compiler) isAssignableIndirect(fieldCode *StructFieldCode, isPtr bool) bool { if isPtr { return false } codeType := fieldCode.value.Kind() if codeType == CodeKindMarshalJSON { return false } if codeType == CodeKindMarshalText { return false } return true } func (c *Compiler) getFieldMap(fields []*StructFieldCode) map[string][]*StructFieldCode { fieldMap := map[string][]*StructFieldCode{} for _, field := range fields { if field.isAnonymous { for k, v := range c.getAnonymousFieldMap(field) { fieldMap[k] = append(fieldMap[k], v...) } continue } fieldMap[field.key] = append(fieldMap[field.key], field) } return fieldMap } func (c *Compiler) getAnonymousFieldMap(field *StructFieldCode) map[string][]*StructFieldCode { fieldMap := map[string][]*StructFieldCode{} structCode := field.getAnonymousStruct() if structCode == nil || structCode.isRecursive { fieldMap[field.key] = append(fieldMap[field.key], field) return fieldMap } for k, v := range c.getFieldMapFromAnonymousParent(structCode.fields) { fieldMap[k] = append(fieldMap[k], v...) } return fieldMap } func (c *Compiler) getFieldMapFromAnonymousParent(fields []*StructFieldCode) map[string][]*StructFieldCode { fieldMap := map[string][]*StructFieldCode{} for _, field := range fields { if field.isAnonymous { for k, v := range c.getAnonymousFieldMap(field) { // Do not handle tagged key when embedding more than once for _, vv := range v { vv.isTaggedKey = false } fieldMap[k] = append(fieldMap[k], v...) } continue } fieldMap[field.key] = append(fieldMap[field.key], field) } return fieldMap } func (c *Compiler) getDuplicatedFieldMap(fieldMap map[string][]*StructFieldCode) map[*StructFieldCode]struct{} { duplicatedFieldMap := map[*StructFieldCode]struct{}{} for _, fields := range fieldMap { if len(fields) == 1 { continue } if c.isTaggedKeyOnly(fields) { for _, field := range fields { if field.isTaggedKey { continue } duplicatedFieldMap[field] = struct{}{} } } else { for _, field := range fields { duplicatedFieldMap[field] = struct{}{} } } } return duplicatedFieldMap } func (c *Compiler) filteredDuplicatedFields(fields []*StructFieldCode, duplicatedFieldMap map[*StructFieldCode]struct{}) []*StructFieldCode { filteredFields := make([]*StructFieldCode, 0, len(fields)) for _, field := range fields { if field.isAnonymous { structCode := field.getAnonymousStruct() if structCode != nil && !structCode.isRecursive { structCode.fields = c.filteredDuplicatedFields(structCode.fields, duplicatedFieldMap) if len(structCode.fields) > 0 { filteredFields = append(filteredFields, field) } continue } } if _, exists := duplicatedFieldMap[field]; exists { continue } filteredFields = append(filteredFields, field) } return filteredFields } func (c *Compiler) isTaggedKeyOnly(fields []*StructFieldCode) bool { var taggedKeyFieldCount int for _, field := range fields { if field.isTaggedKey { taggedKeyFieldCount++ } } return taggedKeyFieldCount == 1 } func (c *Compiler) typeToStructTags(typ *runtime.Type) runtime.StructTags { tags := runtime.StructTags{} fieldNum := typ.NumField() for i := 0; i < fieldNum; i++ { field := typ.Field(i) if runtime.IsIgnoredStructField(field) { continue } tags = append(tags, runtime.StructTagFromField(field)) } return tags } // *struct{ field T } => struct { field *T } // func (*T) MarshalJSON() ([]byte, error) func (c *Compiler) isMovePointerPositionFromHeadToFirstMarshalJSONFieldCase(typ *runtime.Type, isIndirectSpecialCase bool) bool { return isIndirectSpecialCase && !c.isNilableType(typ) && c.isPtrMarshalJSONType(typ) } // *struct{ field T } => struct { field *T } // func (*T) MarshalText() ([]byte, error) func (c *Compiler) isMovePointerPositionFromHeadToFirstMarshalTextFieldCase(typ *runtime.Type, isIndirectSpecialCase bool) bool { return isIndirectSpecialCase && !c.isNilableType(typ) && c.isPtrMarshalTextType(typ) } func (c *Compiler) implementsMarshalJSON(typ *runtime.Type) bool { if !c.implementsMarshalJSONType(typ) { return false } if typ.Kind() != reflect.Ptr { return true } // type kind is reflect.Ptr if !c.implementsMarshalJSONType(typ.Elem()) { return true } // needs to dereference return false } func (c *Compiler) implementsMarshalText(typ *runtime.Type) bool { if !typ.Implements(marshalTextType) { return false } if typ.Kind() != reflect.Ptr { return true } // type kind is reflect.Ptr if !typ.Elem().Implements(marshalTextType) { return true } // needs to dereference return false } func (c *Compiler) isNilableType(typ *runtime.Type) bool { if !runtime.IfaceIndir(typ) { return true } switch typ.Kind() { case reflect.Ptr: return true case reflect.Map: return true case reflect.Func: return true default: return false } } func (c *Compiler) implementsMarshalJSONType(typ *runtime.Type) bool { return typ.Implements(marshalJSONType) || typ.Implements(marshalJSONContextType) } func (c *Compiler) isPtrMarshalJSONType(typ *runtime.Type) bool { return !c.implementsMarshalJSONType(typ) && c.implementsMarshalJSONType(runtime.PtrTo(typ)) } func (c *Compiler) isPtrMarshalTextType(typ *runtime.Type) bool { return !typ.Implements(marshalTextType) && runtime.PtrTo(typ).Implements(marshalTextType) } func (c *Compiler) codeToOpcode(ctx *compileContext, typ *runtime.Type, code Code) *Opcode { codes := code.ToOpcode(ctx) codes.Last().Next = newEndOp(ctx, typ) c.linkRecursiveCode(ctx) return codes.First() } func (c *Compiler) linkRecursiveCode(ctx *compileContext) { recursiveCodes := map[uintptr]*CompiledCode{} for _, recursive := range *ctx.recursiveCodes { typeptr := uintptr(unsafe.Pointer(recursive.Type)) codes := ctx.structTypeToCodes[typeptr] if recursiveCode, ok := recursiveCodes[typeptr]; ok { *recursive.Jmp = *recursiveCode continue } code := copyOpcode(codes.First()) code.Op = code.Op.PtrHeadToHead() lastCode := newEndOp(&compileContext{}, recursive.Type) lastCode.Op = OpRecursiveEnd // OpRecursiveEnd must set before call TotalLength code.End.Next = lastCode totalLength := code.TotalLength() // Idx, ElemIdx, Length must set after call TotalLength lastCode.Idx = uint32((totalLength + 1) * uintptrSize) lastCode.ElemIdx = lastCode.Idx + uintptrSize lastCode.Length = lastCode.Idx + 2*uintptrSize // extend length to alloc slot for elemIdx + length curTotalLength := uintptr(recursive.TotalLength()) + 3 nextTotalLength := uintptr(totalLength) + 3 compiled := recursive.Jmp compiled.Code = code compiled.CurLen = curTotalLength compiled.NextLen = nextTotalLength compiled.Linked = true recursiveCodes[typeptr] = compiled } }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/goccy/go-json/internal/encoder/opcode.go
vendor/github.com/goccy/go-json/internal/encoder/opcode.go
package encoder import ( "bytes" "fmt" "sort" "strings" "unsafe" "github.com/goccy/go-json/internal/runtime" ) const uintptrSize = 4 << (^uintptr(0) >> 63) type OpFlags uint16 const ( AnonymousHeadFlags OpFlags = 1 << 0 AnonymousKeyFlags OpFlags = 1 << 1 IndirectFlags OpFlags = 1 << 2 IsTaggedKeyFlags OpFlags = 1 << 3 NilCheckFlags OpFlags = 1 << 4 AddrForMarshalerFlags OpFlags = 1 << 5 IsNextOpPtrTypeFlags OpFlags = 1 << 6 IsNilableTypeFlags OpFlags = 1 << 7 MarshalerContextFlags OpFlags = 1 << 8 NonEmptyInterfaceFlags OpFlags = 1 << 9 ) type Opcode struct { Op OpType // operation type Idx uint32 // offset to access ptr Next *Opcode // next opcode End *Opcode // array/slice/struct/map end NextField *Opcode // next struct field Key string // struct field key Offset uint32 // offset size from struct header PtrNum uint8 // pointer number: e.g. double pointer is 2. NumBitSize uint8 Flags OpFlags Type *runtime.Type // go type Jmp *CompiledCode // for recursive call FieldQuery *FieldQuery // field query for Interface / MarshalJSON / MarshalText ElemIdx uint32 // offset to access array/slice elem Length uint32 // offset to access slice length or array length Indent uint32 // indent number Size uint32 // array/slice elem size DisplayIdx uint32 // opcode index DisplayKey string // key text to display } func (c *Opcode) Validate() error { var prevIdx uint32 for code := c; !code.IsEnd(); { if prevIdx != 0 { if code.DisplayIdx != prevIdx+1 { return fmt.Errorf( "invalid index. previous display index is %d but next is %d. dump = %s", prevIdx, code.DisplayIdx, c.Dump(), ) } } prevIdx = code.DisplayIdx code = code.IterNext() } return nil } func (c *Opcode) IterNext() *Opcode { if c == nil { return nil } switch c.Op.CodeType() { case CodeArrayElem, CodeSliceElem, CodeMapKey: return c.End default: return c.Next } } func (c *Opcode) IsEnd() bool { if c == nil { return true } return c.Op == OpEnd || c.Op == OpInterfaceEnd || c.Op == OpRecursiveEnd } func (c *Opcode) MaxIdx() uint32 { max := uint32(0) for _, value := range []uint32{ c.Idx, c.ElemIdx, c.Length, c.Size, } { if max < value { max = value } } return max } func (c *Opcode) ToHeaderType(isString bool) OpType { switch c.Op { case OpInt: if isString { return OpStructHeadIntString } return OpStructHeadInt case OpIntPtr: if isString { return OpStructHeadIntPtrString } return OpStructHeadIntPtr case OpUint: if isString { return OpStructHeadUintString } return OpStructHeadUint case OpUintPtr: if isString { return OpStructHeadUintPtrString } return OpStructHeadUintPtr case OpFloat32: if isString { return OpStructHeadFloat32String } return OpStructHeadFloat32 case OpFloat32Ptr: if isString { return OpStructHeadFloat32PtrString } return OpStructHeadFloat32Ptr case OpFloat64: if isString { return OpStructHeadFloat64String } return OpStructHeadFloat64 case OpFloat64Ptr: if isString { return OpStructHeadFloat64PtrString } return OpStructHeadFloat64Ptr case OpString: if isString { return OpStructHeadStringString } return OpStructHeadString case OpStringPtr: if isString { return OpStructHeadStringPtrString } return OpStructHeadStringPtr case OpNumber: if isString { return OpStructHeadNumberString } return OpStructHeadNumber case OpNumberPtr: if isString { return OpStructHeadNumberPtrString } return OpStructHeadNumberPtr case OpBool: if isString { return OpStructHeadBoolString } return OpStructHeadBool case OpBoolPtr: if isString { return OpStructHeadBoolPtrString } return OpStructHeadBoolPtr case OpBytes: return OpStructHeadBytes case OpBytesPtr: return OpStructHeadBytesPtr case OpMap: return OpStructHeadMap case OpMapPtr: c.Op = OpMap return OpStructHeadMapPtr case OpArray: return OpStructHeadArray case OpArrayPtr: c.Op = OpArray return OpStructHeadArrayPtr case OpSlice: return OpStructHeadSlice case OpSlicePtr: c.Op = OpSlice return OpStructHeadSlicePtr case OpMarshalJSON: return OpStructHeadMarshalJSON case OpMarshalJSONPtr: return OpStructHeadMarshalJSONPtr case OpMarshalText: return OpStructHeadMarshalText case OpMarshalTextPtr: return OpStructHeadMarshalTextPtr } return OpStructHead } func (c *Opcode) ToFieldType(isString bool) OpType { switch c.Op { case OpInt: if isString { return OpStructFieldIntString } return OpStructFieldInt case OpIntPtr: if isString { return OpStructFieldIntPtrString } return OpStructFieldIntPtr case OpUint: if isString { return OpStructFieldUintString } return OpStructFieldUint case OpUintPtr: if isString { return OpStructFieldUintPtrString } return OpStructFieldUintPtr case OpFloat32: if isString { return OpStructFieldFloat32String } return OpStructFieldFloat32 case OpFloat32Ptr: if isString { return OpStructFieldFloat32PtrString } return OpStructFieldFloat32Ptr case OpFloat64: if isString { return OpStructFieldFloat64String } return OpStructFieldFloat64 case OpFloat64Ptr: if isString { return OpStructFieldFloat64PtrString } return OpStructFieldFloat64Ptr case OpString: if isString { return OpStructFieldStringString } return OpStructFieldString case OpStringPtr: if isString { return OpStructFieldStringPtrString } return OpStructFieldStringPtr case OpNumber: if isString { return OpStructFieldNumberString } return OpStructFieldNumber case OpNumberPtr: if isString { return OpStructFieldNumberPtrString } return OpStructFieldNumberPtr case OpBool: if isString { return OpStructFieldBoolString } return OpStructFieldBool case OpBoolPtr: if isString { return OpStructFieldBoolPtrString } return OpStructFieldBoolPtr case OpBytes: return OpStructFieldBytes case OpBytesPtr: return OpStructFieldBytesPtr case OpMap: return OpStructFieldMap case OpMapPtr: c.Op = OpMap return OpStructFieldMapPtr case OpArray: return OpStructFieldArray case OpArrayPtr: c.Op = OpArray return OpStructFieldArrayPtr case OpSlice: return OpStructFieldSlice case OpSlicePtr: c.Op = OpSlice return OpStructFieldSlicePtr case OpMarshalJSON: return OpStructFieldMarshalJSON case OpMarshalJSONPtr: return OpStructFieldMarshalJSONPtr case OpMarshalText: return OpStructFieldMarshalText case OpMarshalTextPtr: return OpStructFieldMarshalTextPtr } return OpStructField } func newOpCode(ctx *compileContext, typ *runtime.Type, op OpType) *Opcode { return newOpCodeWithNext(ctx, typ, op, newEndOp(ctx, typ)) } func opcodeOffset(idx int) uint32 { return uint32(idx) * uintptrSize } func getCodeAddrByIdx(head *Opcode, idx uint32) *Opcode { addr := uintptr(unsafe.Pointer(head)) + uintptr(idx)*unsafe.Sizeof(Opcode{}) return *(**Opcode)(unsafe.Pointer(&addr)) } func copyOpcode(code *Opcode) *Opcode { codeNum := ToEndCode(code).DisplayIdx + 1 codeSlice := make([]Opcode, codeNum) head := (*Opcode)((*runtime.SliceHeader)(unsafe.Pointer(&codeSlice)).Data) ptr := head c := code for { *ptr = Opcode{ Op: c.Op, Key: c.Key, PtrNum: c.PtrNum, NumBitSize: c.NumBitSize, Flags: c.Flags, Idx: c.Idx, Offset: c.Offset, Type: c.Type, FieldQuery: c.FieldQuery, DisplayIdx: c.DisplayIdx, DisplayKey: c.DisplayKey, ElemIdx: c.ElemIdx, Length: c.Length, Size: c.Size, Indent: c.Indent, Jmp: c.Jmp, } if c.End != nil { ptr.End = getCodeAddrByIdx(head, c.End.DisplayIdx) } if c.NextField != nil { ptr.NextField = getCodeAddrByIdx(head, c.NextField.DisplayIdx) } if c.Next != nil { ptr.Next = getCodeAddrByIdx(head, c.Next.DisplayIdx) } if c.IsEnd() { break } ptr = getCodeAddrByIdx(head, c.DisplayIdx+1) c = c.IterNext() } return head } func setTotalLengthToInterfaceOp(code *Opcode) { for c := code; !c.IsEnd(); { if c.Op == OpInterface || c.Op == OpInterfacePtr { c.Length = uint32(code.TotalLength()) } c = c.IterNext() } } func ToEndCode(code *Opcode) *Opcode { c := code for !c.IsEnd() { c = c.IterNext() } return c } func copyToInterfaceOpcode(code *Opcode) *Opcode { copied := copyOpcode(code) c := copied c = ToEndCode(c) c.Idx += uintptrSize c.ElemIdx = c.Idx + uintptrSize c.Length = c.Idx + 2*uintptrSize c.Op = OpInterfaceEnd return copied } func newOpCodeWithNext(ctx *compileContext, typ *runtime.Type, op OpType, next *Opcode) *Opcode { return &Opcode{ Op: op, Idx: opcodeOffset(ctx.ptrIndex), Next: next, Type: typ, DisplayIdx: ctx.opcodeIndex, Indent: ctx.indent, } } func newEndOp(ctx *compileContext, typ *runtime.Type) *Opcode { return newOpCodeWithNext(ctx, typ, OpEnd, nil) } func (c *Opcode) TotalLength() int { var idx int code := c for !code.IsEnd() { maxIdx := int(code.MaxIdx() / uintptrSize) if idx < maxIdx { idx = maxIdx } if code.Op == OpRecursiveEnd { break } code = code.IterNext() } maxIdx := int(code.MaxIdx() / uintptrSize) if idx < maxIdx { idx = maxIdx } return idx + 1 } func (c *Opcode) dumpHead(code *Opcode) string { var length uint32 if code.Op.CodeType() == CodeArrayHead { length = code.Length } else { length = code.Length / uintptrSize } return fmt.Sprintf( `[%03d]%s%s ([idx:%d][elemIdx:%d][length:%d])`, code.DisplayIdx, strings.Repeat("-", int(code.Indent)), code.Op, code.Idx/uintptrSize, code.ElemIdx/uintptrSize, length, ) } func (c *Opcode) dumpMapHead(code *Opcode) string { return fmt.Sprintf( `[%03d]%s%s ([idx:%d])`, code.DisplayIdx, strings.Repeat("-", int(code.Indent)), code.Op, code.Idx/uintptrSize, ) } func (c *Opcode) dumpMapEnd(code *Opcode) string { return fmt.Sprintf( `[%03d]%s%s ([idx:%d])`, code.DisplayIdx, strings.Repeat("-", int(code.Indent)), code.Op, code.Idx/uintptrSize, ) } func (c *Opcode) dumpElem(code *Opcode) string { var length uint32 if code.Op.CodeType() == CodeArrayElem { length = code.Length } else { length = code.Length / uintptrSize } return fmt.Sprintf( `[%03d]%s%s ([idx:%d][elemIdx:%d][length:%d][size:%d])`, code.DisplayIdx, strings.Repeat("-", int(code.Indent)), code.Op, code.Idx/uintptrSize, code.ElemIdx/uintptrSize, length, code.Size, ) } func (c *Opcode) dumpField(code *Opcode) string { return fmt.Sprintf( `[%03d]%s%s ([idx:%d][key:%s][offset:%d])`, code.DisplayIdx, strings.Repeat("-", int(code.Indent)), code.Op, code.Idx/uintptrSize, code.DisplayKey, code.Offset, ) } func (c *Opcode) dumpKey(code *Opcode) string { return fmt.Sprintf( `[%03d]%s%s ([idx:%d])`, code.DisplayIdx, strings.Repeat("-", int(code.Indent)), code.Op, code.Idx/uintptrSize, ) } func (c *Opcode) dumpValue(code *Opcode) string { return fmt.Sprintf( `[%03d]%s%s ([idx:%d])`, code.DisplayIdx, strings.Repeat("-", int(code.Indent)), code.Op, code.Idx/uintptrSize, ) } func (c *Opcode) Dump() string { codes := []string{} for code := c; !code.IsEnd(); { switch code.Op.CodeType() { case CodeSliceHead: codes = append(codes, c.dumpHead(code)) code = code.Next case CodeMapHead: codes = append(codes, c.dumpMapHead(code)) code = code.Next case CodeArrayElem, CodeSliceElem: codes = append(codes, c.dumpElem(code)) code = code.End case CodeMapKey: codes = append(codes, c.dumpKey(code)) code = code.End case CodeMapValue: codes = append(codes, c.dumpValue(code)) code = code.Next case CodeMapEnd: codes = append(codes, c.dumpMapEnd(code)) code = code.Next case CodeStructField: codes = append(codes, c.dumpField(code)) code = code.Next case CodeStructEnd: codes = append(codes, c.dumpField(code)) code = code.Next default: codes = append(codes, fmt.Sprintf( "[%03d]%s%s ([idx:%d])", code.DisplayIdx, strings.Repeat("-", int(code.Indent)), code.Op, code.Idx/uintptrSize, )) code = code.Next } } return strings.Join(codes, "\n") } func (c *Opcode) DumpDOT() string { type edge struct { from, to *Opcode label string weight int } var edges []edge b := &bytes.Buffer{} fmt.Fprintf(b, "digraph \"%p\" {\n", c.Type) fmt.Fprintln(b, "mclimit=1.5;\nrankdir=TD;\nordering=out;\nnode[shape=box];") for code := c; !code.IsEnd(); { label := code.Op.String() fmt.Fprintf(b, "\"%p\" [label=%q];\n", code, label) if p := code.Next; p != nil { edges = append(edges, edge{ from: code, to: p, label: "Next", weight: 10, }) } if p := code.NextField; p != nil { edges = append(edges, edge{ from: code, to: p, label: "NextField", weight: 2, }) } if p := code.End; p != nil { edges = append(edges, edge{ from: code, to: p, label: "End", weight: 1, }) } if p := code.Jmp; p != nil { edges = append(edges, edge{ from: code, to: p.Code, label: "Jmp", weight: 1, }) } switch code.Op.CodeType() { case CodeSliceHead: code = code.Next case CodeMapHead: code = code.Next case CodeArrayElem, CodeSliceElem: code = code.End case CodeMapKey: code = code.End case CodeMapValue: code = code.Next case CodeMapEnd: code = code.Next case CodeStructField: code = code.Next case CodeStructEnd: code = code.Next default: code = code.Next } if code.IsEnd() { fmt.Fprintf(b, "\"%p\" [label=%q];\n", code, code.Op.String()) } } sort.Slice(edges, func(i, j int) bool { return edges[i].to.DisplayIdx < edges[j].to.DisplayIdx }) for _, e := range edges { fmt.Fprintf(b, "\"%p\" -> \"%p\" [label=%q][weight=%d];\n", e.from, e.to, e.label, e.weight) } fmt.Fprint(b, "}") return b.String() } func newSliceHeaderCode(ctx *compileContext, typ *runtime.Type) *Opcode { idx := opcodeOffset(ctx.ptrIndex) ctx.incPtrIndex() elemIdx := opcodeOffset(ctx.ptrIndex) ctx.incPtrIndex() length := opcodeOffset(ctx.ptrIndex) return &Opcode{ Op: OpSlice, Type: typ, Idx: idx, DisplayIdx: ctx.opcodeIndex, ElemIdx: elemIdx, Length: length, Indent: ctx.indent, } } func newSliceElemCode(ctx *compileContext, typ *runtime.Type, head *Opcode, size uintptr) *Opcode { return &Opcode{ Op: OpSliceElem, Type: typ, Idx: head.Idx, DisplayIdx: ctx.opcodeIndex, ElemIdx: head.ElemIdx, Length: head.Length, Indent: ctx.indent, Size: uint32(size), } } func newArrayHeaderCode(ctx *compileContext, typ *runtime.Type, alen int) *Opcode { idx := opcodeOffset(ctx.ptrIndex) ctx.incPtrIndex() elemIdx := opcodeOffset(ctx.ptrIndex) return &Opcode{ Op: OpArray, Type: typ, Idx: idx, DisplayIdx: ctx.opcodeIndex, ElemIdx: elemIdx, Indent: ctx.indent, Length: uint32(alen), } } func newArrayElemCode(ctx *compileContext, typ *runtime.Type, head *Opcode, length int, size uintptr) *Opcode { return &Opcode{ Op: OpArrayElem, Type: typ, Idx: head.Idx, DisplayIdx: ctx.opcodeIndex, ElemIdx: head.ElemIdx, Length: uint32(length), Indent: ctx.indent, Size: uint32(size), } } func newMapHeaderCode(ctx *compileContext, typ *runtime.Type) *Opcode { idx := opcodeOffset(ctx.ptrIndex) ctx.incPtrIndex() return &Opcode{ Op: OpMap, Type: typ, Idx: idx, DisplayIdx: ctx.opcodeIndex, Indent: ctx.indent, } } func newMapKeyCode(ctx *compileContext, typ *runtime.Type, head *Opcode) *Opcode { return &Opcode{ Op: OpMapKey, Type: typ, Idx: head.Idx, DisplayIdx: ctx.opcodeIndex, Indent: ctx.indent, } } func newMapValueCode(ctx *compileContext, typ *runtime.Type, head *Opcode) *Opcode { return &Opcode{ Op: OpMapValue, Type: typ, Idx: head.Idx, DisplayIdx: ctx.opcodeIndex, Indent: ctx.indent, } } func newMapEndCode(ctx *compileContext, typ *runtime.Type, head *Opcode) *Opcode { return &Opcode{ Op: OpMapEnd, Type: typ, Idx: head.Idx, DisplayIdx: ctx.opcodeIndex, Indent: ctx.indent, Next: newEndOp(ctx, typ), } } func newRecursiveCode(ctx *compileContext, typ *runtime.Type, jmp *CompiledCode) *Opcode { return &Opcode{ Op: OpRecursive, Type: typ, Idx: opcodeOffset(ctx.ptrIndex), Next: newEndOp(ctx, typ), DisplayIdx: ctx.opcodeIndex, Indent: ctx.indent, Jmp: jmp, } }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/goccy/go-json/internal/encoder/code.go
vendor/github.com/goccy/go-json/internal/encoder/code.go
package encoder import ( "fmt" "reflect" "unsafe" "github.com/goccy/go-json/internal/runtime" ) type Code interface { Kind() CodeKind ToOpcode(*compileContext) Opcodes Filter(*FieldQuery) Code } type AnonymousCode interface { ToAnonymousOpcode(*compileContext) Opcodes } type Opcodes []*Opcode func (o Opcodes) First() *Opcode { if len(o) == 0 { return nil } return o[0] } func (o Opcodes) Last() *Opcode { if len(o) == 0 { return nil } return o[len(o)-1] } func (o Opcodes) Add(codes ...*Opcode) Opcodes { return append(o, codes...) } type CodeKind int const ( CodeKindInterface CodeKind = iota CodeKindPtr CodeKindInt CodeKindUint CodeKindFloat CodeKindString CodeKindBool CodeKindStruct CodeKindMap CodeKindSlice CodeKindArray CodeKindBytes CodeKindMarshalJSON CodeKindMarshalText CodeKindRecursive ) type IntCode struct { typ *runtime.Type bitSize uint8 isString bool isPtr bool } func (c *IntCode) Kind() CodeKind { return CodeKindInt } func (c *IntCode) ToOpcode(ctx *compileContext) Opcodes { var code *Opcode switch { case c.isPtr: code = newOpCode(ctx, c.typ, OpIntPtr) case c.isString: code = newOpCode(ctx, c.typ, OpIntString) default: code = newOpCode(ctx, c.typ, OpInt) } code.NumBitSize = c.bitSize ctx.incIndex() return Opcodes{code} } func (c *IntCode) Filter(_ *FieldQuery) Code { return c } type UintCode struct { typ *runtime.Type bitSize uint8 isString bool isPtr bool } func (c *UintCode) Kind() CodeKind { return CodeKindUint } func (c *UintCode) ToOpcode(ctx *compileContext) Opcodes { var code *Opcode switch { case c.isPtr: code = newOpCode(ctx, c.typ, OpUintPtr) case c.isString: code = newOpCode(ctx, c.typ, OpUintString) default: code = newOpCode(ctx, c.typ, OpUint) } code.NumBitSize = c.bitSize ctx.incIndex() return Opcodes{code} } func (c *UintCode) Filter(_ *FieldQuery) Code { return c } type FloatCode struct { typ *runtime.Type bitSize uint8 isPtr bool } func (c *FloatCode) Kind() CodeKind { return CodeKindFloat } func (c *FloatCode) ToOpcode(ctx *compileContext) Opcodes { var code *Opcode switch { case c.isPtr: switch c.bitSize { case 32: code = newOpCode(ctx, c.typ, OpFloat32Ptr) default: code = newOpCode(ctx, c.typ, OpFloat64Ptr) } default: switch c.bitSize { case 32: code = newOpCode(ctx, c.typ, OpFloat32) default: code = newOpCode(ctx, c.typ, OpFloat64) } } ctx.incIndex() return Opcodes{code} } func (c *FloatCode) Filter(_ *FieldQuery) Code { return c } type StringCode struct { typ *runtime.Type isPtr bool } func (c *StringCode) Kind() CodeKind { return CodeKindString } func (c *StringCode) ToOpcode(ctx *compileContext) Opcodes { isJSONNumberType := c.typ == runtime.Type2RType(jsonNumberType) var code *Opcode if c.isPtr { if isJSONNumberType { code = newOpCode(ctx, c.typ, OpNumberPtr) } else { code = newOpCode(ctx, c.typ, OpStringPtr) } } else { if isJSONNumberType { code = newOpCode(ctx, c.typ, OpNumber) } else { code = newOpCode(ctx, c.typ, OpString) } } ctx.incIndex() return Opcodes{code} } func (c *StringCode) Filter(_ *FieldQuery) Code { return c } type BoolCode struct { typ *runtime.Type isPtr bool } func (c *BoolCode) Kind() CodeKind { return CodeKindBool } func (c *BoolCode) ToOpcode(ctx *compileContext) Opcodes { var code *Opcode switch { case c.isPtr: code = newOpCode(ctx, c.typ, OpBoolPtr) default: code = newOpCode(ctx, c.typ, OpBool) } ctx.incIndex() return Opcodes{code} } func (c *BoolCode) Filter(_ *FieldQuery) Code { return c } type BytesCode struct { typ *runtime.Type isPtr bool } func (c *BytesCode) Kind() CodeKind { return CodeKindBytes } func (c *BytesCode) ToOpcode(ctx *compileContext) Opcodes { var code *Opcode switch { case c.isPtr: code = newOpCode(ctx, c.typ, OpBytesPtr) default: code = newOpCode(ctx, c.typ, OpBytes) } ctx.incIndex() return Opcodes{code} } func (c *BytesCode) Filter(_ *FieldQuery) Code { return c } type SliceCode struct { typ *runtime.Type value Code } func (c *SliceCode) Kind() CodeKind { return CodeKindSlice } func (c *SliceCode) ToOpcode(ctx *compileContext) Opcodes { // header => opcode => elem => end // ^ | // |________| size := c.typ.Elem().Size() header := newSliceHeaderCode(ctx, c.typ) ctx.incIndex() ctx.incIndent() codes := c.value.ToOpcode(ctx) ctx.decIndent() codes.First().Flags |= IndirectFlags elemCode := newSliceElemCode(ctx, c.typ.Elem(), header, size) ctx.incIndex() end := newOpCode(ctx, c.typ, OpSliceEnd) ctx.incIndex() header.End = end header.Next = codes.First() codes.Last().Next = elemCode elemCode.Next = codes.First() elemCode.End = end return Opcodes{header}.Add(codes...).Add(elemCode).Add(end) } func (c *SliceCode) Filter(_ *FieldQuery) Code { return c } type ArrayCode struct { typ *runtime.Type value Code } func (c *ArrayCode) Kind() CodeKind { return CodeKindArray } func (c *ArrayCode) ToOpcode(ctx *compileContext) Opcodes { // header => opcode => elem => end // ^ | // |________| elem := c.typ.Elem() alen := c.typ.Len() size := elem.Size() header := newArrayHeaderCode(ctx, c.typ, alen) ctx.incIndex() ctx.incIndent() codes := c.value.ToOpcode(ctx) ctx.decIndent() codes.First().Flags |= IndirectFlags elemCode := newArrayElemCode(ctx, elem, header, alen, size) ctx.incIndex() end := newOpCode(ctx, c.typ, OpArrayEnd) ctx.incIndex() header.End = end header.Next = codes.First() codes.Last().Next = elemCode elemCode.Next = codes.First() elemCode.End = end return Opcodes{header}.Add(codes...).Add(elemCode).Add(end) } func (c *ArrayCode) Filter(_ *FieldQuery) Code { return c } type MapCode struct { typ *runtime.Type key Code value Code } func (c *MapCode) Kind() CodeKind { return CodeKindMap } func (c *MapCode) ToOpcode(ctx *compileContext) Opcodes { // header => code => value => code => key => code => value => code => end // ^ | // |_______________________| header := newMapHeaderCode(ctx, c.typ) ctx.incIndex() keyCodes := c.key.ToOpcode(ctx) value := newMapValueCode(ctx, c.typ.Elem(), header) ctx.incIndex() ctx.incIndent() valueCodes := c.value.ToOpcode(ctx) ctx.decIndent() valueCodes.First().Flags |= IndirectFlags key := newMapKeyCode(ctx, c.typ.Key(), header) ctx.incIndex() end := newMapEndCode(ctx, c.typ, header) ctx.incIndex() header.Next = keyCodes.First() keyCodes.Last().Next = value value.Next = valueCodes.First() valueCodes.Last().Next = key key.Next = keyCodes.First() header.End = end key.End = end value.End = end return Opcodes{header}.Add(keyCodes...).Add(value).Add(valueCodes...).Add(key).Add(end) } func (c *MapCode) Filter(_ *FieldQuery) Code { return c } type StructCode struct { typ *runtime.Type fields []*StructFieldCode isPtr bool disableIndirectConversion bool isIndirect bool isRecursive bool } func (c *StructCode) Kind() CodeKind { return CodeKindStruct } func (c *StructCode) lastFieldCode(field *StructFieldCode, firstField *Opcode) *Opcode { if isEmbeddedStruct(field) { return c.lastAnonymousFieldCode(firstField) } lastField := firstField for lastField.NextField != nil { lastField = lastField.NextField } return lastField } func (c *StructCode) lastAnonymousFieldCode(firstField *Opcode) *Opcode { // firstField is special StructHead operation for anonymous structure. // So, StructHead's next operation is truly struct head operation. for firstField.Op == OpStructHead || firstField.Op == OpStructField { firstField = firstField.Next } lastField := firstField for lastField.NextField != nil { lastField = lastField.NextField } return lastField } func (c *StructCode) ToOpcode(ctx *compileContext) Opcodes { // header => code => structField => code => end // ^ | // |__________| if c.isRecursive { recursive := newRecursiveCode(ctx, c.typ, &CompiledCode{}) recursive.Type = c.typ ctx.incIndex() *ctx.recursiveCodes = append(*ctx.recursiveCodes, recursive) return Opcodes{recursive} } codes := Opcodes{} var prevField *Opcode ctx.incIndent() for idx, field := range c.fields { isFirstField := idx == 0 isEndField := idx == len(c.fields)-1 fieldCodes := field.ToOpcode(ctx, isFirstField, isEndField) for _, code := range fieldCodes { if c.isIndirect { code.Flags |= IndirectFlags } } firstField := fieldCodes.First() if len(codes) > 0 { codes.Last().Next = firstField firstField.Idx = codes.First().Idx } if prevField != nil { prevField.NextField = firstField } if isEndField { endField := fieldCodes.Last() if len(codes) > 0 { codes.First().End = endField } else { firstField.End = endField } codes = codes.Add(fieldCodes...) break } prevField = c.lastFieldCode(field, firstField) codes = codes.Add(fieldCodes...) } if len(codes) == 0 { head := &Opcode{ Op: OpStructHead, Idx: opcodeOffset(ctx.ptrIndex), Type: c.typ, DisplayIdx: ctx.opcodeIndex, Indent: ctx.indent, } ctx.incOpcodeIndex() end := &Opcode{ Op: OpStructEnd, Idx: opcodeOffset(ctx.ptrIndex), DisplayIdx: ctx.opcodeIndex, Indent: ctx.indent, } head.NextField = end head.Next = end head.End = end codes = codes.Add(head, end) ctx.incIndex() } ctx.decIndent() ctx.structTypeToCodes[uintptr(unsafe.Pointer(c.typ))] = codes return codes } func (c *StructCode) ToAnonymousOpcode(ctx *compileContext) Opcodes { // header => code => structField => code => end // ^ | // |__________| if c.isRecursive { recursive := newRecursiveCode(ctx, c.typ, &CompiledCode{}) recursive.Type = c.typ ctx.incIndex() *ctx.recursiveCodes = append(*ctx.recursiveCodes, recursive) return Opcodes{recursive} } codes := Opcodes{} var prevField *Opcode for idx, field := range c.fields { isFirstField := idx == 0 isEndField := idx == len(c.fields)-1 fieldCodes := field.ToAnonymousOpcode(ctx, isFirstField, isEndField) for _, code := range fieldCodes { if c.isIndirect { code.Flags |= IndirectFlags } } firstField := fieldCodes.First() if len(codes) > 0 { codes.Last().Next = firstField firstField.Idx = codes.First().Idx } if prevField != nil { prevField.NextField = firstField } if isEndField { lastField := fieldCodes.Last() if len(codes) > 0 { codes.First().End = lastField } else { firstField.End = lastField } } prevField = firstField codes = codes.Add(fieldCodes...) } return codes } func (c *StructCode) removeFieldsByTags(tags runtime.StructTags) { fields := make([]*StructFieldCode, 0, len(c.fields)) for _, field := range c.fields { if field.isAnonymous { structCode := field.getAnonymousStruct() if structCode != nil && !structCode.isRecursive { structCode.removeFieldsByTags(tags) if len(structCode.fields) > 0 { fields = append(fields, field) } continue } } if tags.ExistsKey(field.key) { continue } fields = append(fields, field) } c.fields = fields } func (c *StructCode) enableIndirect() { if c.isIndirect { return } c.isIndirect = true if len(c.fields) == 0 { return } structCode := c.fields[0].getStruct() if structCode == nil { return } structCode.enableIndirect() } func (c *StructCode) Filter(query *FieldQuery) Code { fieldMap := map[string]*FieldQuery{} for _, field := range query.Fields { fieldMap[field.Name] = field } fields := make([]*StructFieldCode, 0, len(c.fields)) for _, field := range c.fields { query, exists := fieldMap[field.key] if !exists { continue } fieldCode := &StructFieldCode{ typ: field.typ, key: field.key, tag: field.tag, value: field.value, offset: field.offset, isAnonymous: field.isAnonymous, isTaggedKey: field.isTaggedKey, isNilableType: field.isNilableType, isNilCheck: field.isNilCheck, isAddrForMarshaler: field.isAddrForMarshaler, isNextOpPtrType: field.isNextOpPtrType, } if len(query.Fields) > 0 { fieldCode.value = fieldCode.value.Filter(query) } fields = append(fields, fieldCode) } return &StructCode{ typ: c.typ, fields: fields, isPtr: c.isPtr, disableIndirectConversion: c.disableIndirectConversion, isIndirect: c.isIndirect, isRecursive: c.isRecursive, } } type StructFieldCode struct { typ *runtime.Type key string tag *runtime.StructTag value Code offset uintptr isAnonymous bool isTaggedKey bool isNilableType bool isNilCheck bool isAddrForMarshaler bool isNextOpPtrType bool isMarshalerContext bool } func (c *StructFieldCode) getStruct() *StructCode { value := c.value ptr, ok := value.(*PtrCode) if ok { value = ptr.value } structCode, ok := value.(*StructCode) if ok { return structCode } return nil } func (c *StructFieldCode) getAnonymousStruct() *StructCode { if !c.isAnonymous { return nil } return c.getStruct() } func optimizeStructHeader(code *Opcode, tag *runtime.StructTag) OpType { headType := code.ToHeaderType(tag.IsString) if tag.IsOmitEmpty { headType = headType.HeadToOmitEmptyHead() } return headType } func optimizeStructField(code *Opcode, tag *runtime.StructTag) OpType { fieldType := code.ToFieldType(tag.IsString) if tag.IsOmitEmpty { fieldType = fieldType.FieldToOmitEmptyField() } return fieldType } func (c *StructFieldCode) headerOpcodes(ctx *compileContext, field *Opcode, valueCodes Opcodes) Opcodes { value := valueCodes.First() op := optimizeStructHeader(value, c.tag) field.Op = op if value.Flags&MarshalerContextFlags != 0 { field.Flags |= MarshalerContextFlags } field.NumBitSize = value.NumBitSize field.PtrNum = value.PtrNum field.FieldQuery = value.FieldQuery fieldCodes := Opcodes{field} if op.IsMultipleOpHead() { field.Next = value fieldCodes = fieldCodes.Add(valueCodes...) } else { ctx.decIndex() } return fieldCodes } func (c *StructFieldCode) fieldOpcodes(ctx *compileContext, field *Opcode, valueCodes Opcodes) Opcodes { value := valueCodes.First() op := optimizeStructField(value, c.tag) field.Op = op if value.Flags&MarshalerContextFlags != 0 { field.Flags |= MarshalerContextFlags } field.NumBitSize = value.NumBitSize field.PtrNum = value.PtrNum field.FieldQuery = value.FieldQuery fieldCodes := Opcodes{field} if op.IsMultipleOpField() { field.Next = value fieldCodes = fieldCodes.Add(valueCodes...) } else { ctx.decIndex() } return fieldCodes } func (c *StructFieldCode) addStructEndCode(ctx *compileContext, codes Opcodes) Opcodes { end := &Opcode{ Op: OpStructEnd, Idx: opcodeOffset(ctx.ptrIndex), DisplayIdx: ctx.opcodeIndex, Indent: ctx.indent, } codes.Last().Next = end code := codes.First() for code.Op == OpStructField || code.Op == OpStructHead { code = code.Next } for code.NextField != nil { code = code.NextField } code.NextField = end codes = codes.Add(end) ctx.incOpcodeIndex() return codes } func (c *StructFieldCode) structKey(ctx *compileContext) string { if ctx.escapeKey { rctx := &RuntimeContext{Option: &Option{Flag: HTMLEscapeOption}} return fmt.Sprintf(`%s:`, string(AppendString(rctx, []byte{}, c.key))) } return fmt.Sprintf(`"%s":`, c.key) } func (c *StructFieldCode) flags() OpFlags { var flags OpFlags if c.isTaggedKey { flags |= IsTaggedKeyFlags } if c.isNilableType { flags |= IsNilableTypeFlags } if c.isNilCheck { flags |= NilCheckFlags } if c.isAddrForMarshaler { flags |= AddrForMarshalerFlags } if c.isNextOpPtrType { flags |= IsNextOpPtrTypeFlags } if c.isAnonymous { flags |= AnonymousKeyFlags } if c.isMarshalerContext { flags |= MarshalerContextFlags } return flags } func (c *StructFieldCode) toValueOpcodes(ctx *compileContext) Opcodes { if c.isAnonymous { anonymCode, ok := c.value.(AnonymousCode) if ok { return anonymCode.ToAnonymousOpcode(ctx) } } return c.value.ToOpcode(ctx) } func (c *StructFieldCode) ToOpcode(ctx *compileContext, isFirstField, isEndField bool) Opcodes { field := &Opcode{ Idx: opcodeOffset(ctx.ptrIndex), Flags: c.flags(), Key: c.structKey(ctx), Offset: uint32(c.offset), Type: c.typ, DisplayIdx: ctx.opcodeIndex, Indent: ctx.indent, DisplayKey: c.key, } ctx.incIndex() valueCodes := c.toValueOpcodes(ctx) if isFirstField { codes := c.headerOpcodes(ctx, field, valueCodes) if isEndField { codes = c.addStructEndCode(ctx, codes) } return codes } codes := c.fieldOpcodes(ctx, field, valueCodes) if isEndField { if isEnableStructEndOptimization(c.value) { field.Op = field.Op.FieldToEnd() } else { codes = c.addStructEndCode(ctx, codes) } } return codes } func (c *StructFieldCode) ToAnonymousOpcode(ctx *compileContext, isFirstField, isEndField bool) Opcodes { field := &Opcode{ Idx: opcodeOffset(ctx.ptrIndex), Flags: c.flags() | AnonymousHeadFlags, Key: c.structKey(ctx), Offset: uint32(c.offset), Type: c.typ, DisplayIdx: ctx.opcodeIndex, Indent: ctx.indent, DisplayKey: c.key, } ctx.incIndex() valueCodes := c.toValueOpcodes(ctx) if isFirstField { return c.headerOpcodes(ctx, field, valueCodes) } return c.fieldOpcodes(ctx, field, valueCodes) } func isEnableStructEndOptimization(value Code) bool { switch value.Kind() { case CodeKindInt, CodeKindUint, CodeKindFloat, CodeKindString, CodeKindBool, CodeKindBytes: return true case CodeKindPtr: return isEnableStructEndOptimization(value.(*PtrCode).value) default: return false } } type InterfaceCode struct { typ *runtime.Type fieldQuery *FieldQuery isPtr bool } func (c *InterfaceCode) Kind() CodeKind { return CodeKindInterface } func (c *InterfaceCode) ToOpcode(ctx *compileContext) Opcodes { var code *Opcode switch { case c.isPtr: code = newOpCode(ctx, c.typ, OpInterfacePtr) default: code = newOpCode(ctx, c.typ, OpInterface) } code.FieldQuery = c.fieldQuery if c.typ.NumMethod() > 0 { code.Flags |= NonEmptyInterfaceFlags } ctx.incIndex() return Opcodes{code} } func (c *InterfaceCode) Filter(query *FieldQuery) Code { return &InterfaceCode{ typ: c.typ, fieldQuery: query, isPtr: c.isPtr, } } type MarshalJSONCode struct { typ *runtime.Type fieldQuery *FieldQuery isAddrForMarshaler bool isNilableType bool isMarshalerContext bool } func (c *MarshalJSONCode) Kind() CodeKind { return CodeKindMarshalJSON } func (c *MarshalJSONCode) ToOpcode(ctx *compileContext) Opcodes { code := newOpCode(ctx, c.typ, OpMarshalJSON) code.FieldQuery = c.fieldQuery if c.isAddrForMarshaler { code.Flags |= AddrForMarshalerFlags } if c.isMarshalerContext { code.Flags |= MarshalerContextFlags } if c.isNilableType { code.Flags |= IsNilableTypeFlags } else { code.Flags &= ^IsNilableTypeFlags } ctx.incIndex() return Opcodes{code} } func (c *MarshalJSONCode) Filter(query *FieldQuery) Code { return &MarshalJSONCode{ typ: c.typ, fieldQuery: query, isAddrForMarshaler: c.isAddrForMarshaler, isNilableType: c.isNilableType, isMarshalerContext: c.isMarshalerContext, } } type MarshalTextCode struct { typ *runtime.Type fieldQuery *FieldQuery isAddrForMarshaler bool isNilableType bool } func (c *MarshalTextCode) Kind() CodeKind { return CodeKindMarshalText } func (c *MarshalTextCode) ToOpcode(ctx *compileContext) Opcodes { code := newOpCode(ctx, c.typ, OpMarshalText) code.FieldQuery = c.fieldQuery if c.isAddrForMarshaler { code.Flags |= AddrForMarshalerFlags } if c.isNilableType { code.Flags |= IsNilableTypeFlags } else { code.Flags &= ^IsNilableTypeFlags } ctx.incIndex() return Opcodes{code} } func (c *MarshalTextCode) Filter(query *FieldQuery) Code { return &MarshalTextCode{ typ: c.typ, fieldQuery: query, isAddrForMarshaler: c.isAddrForMarshaler, isNilableType: c.isNilableType, } } type PtrCode struct { typ *runtime.Type value Code ptrNum uint8 } func (c *PtrCode) Kind() CodeKind { return CodeKindPtr } func (c *PtrCode) ToOpcode(ctx *compileContext) Opcodes { codes := c.value.ToOpcode(ctx) codes.First().Op = convertPtrOp(codes.First()) codes.First().PtrNum = c.ptrNum return codes } func (c *PtrCode) ToAnonymousOpcode(ctx *compileContext) Opcodes { var codes Opcodes anonymCode, ok := c.value.(AnonymousCode) if ok { codes = anonymCode.ToAnonymousOpcode(ctx) } else { codes = c.value.ToOpcode(ctx) } codes.First().Op = convertPtrOp(codes.First()) codes.First().PtrNum = c.ptrNum return codes } func (c *PtrCode) Filter(query *FieldQuery) Code { return &PtrCode{ typ: c.typ, value: c.value.Filter(query), ptrNum: c.ptrNum, } } func convertPtrOp(code *Opcode) OpType { ptrHeadOp := code.Op.HeadToPtrHead() if code.Op != ptrHeadOp { if code.PtrNum > 0 { // ptr field and ptr head code.PtrNum-- } return ptrHeadOp } switch code.Op { case OpInt: return OpIntPtr case OpUint: return OpUintPtr case OpFloat32: return OpFloat32Ptr case OpFloat64: return OpFloat64Ptr case OpString: return OpStringPtr case OpBool: return OpBoolPtr case OpBytes: return OpBytesPtr case OpNumber: return OpNumberPtr case OpArray: return OpArrayPtr case OpSlice: return OpSlicePtr case OpMap: return OpMapPtr case OpMarshalJSON: return OpMarshalJSONPtr case OpMarshalText: return OpMarshalTextPtr case OpInterface: return OpInterfacePtr case OpRecursive: return OpRecursivePtr } return code.Op } func isEmbeddedStruct(field *StructFieldCode) bool { if !field.isAnonymous { return false } t := field.typ if t.Kind() == reflect.Ptr { t = t.Elem() } return t.Kind() == reflect.Struct }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/goccy/go-json/internal/encoder/string.go
vendor/github.com/goccy/go-json/internal/encoder/string.go
// This files's string processing codes are inspired by https://github.com/segmentio/encoding. // The license notation is as follows. // // # MIT License // // Copyright (c) 2019 Segment.io, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in all // copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE // SOFTWARE. package encoder import ( "math/bits" "reflect" "unsafe" ) const ( lsb = 0x0101010101010101 msb = 0x8080808080808080 ) var hex = "0123456789abcdef" //nolint:govet func stringToUint64Slice(s string) []uint64 { return *(*[]uint64)(unsafe.Pointer(&reflect.SliceHeader{ Data: ((*reflect.StringHeader)(unsafe.Pointer(&s))).Data, Len: len(s) / 8, Cap: len(s) / 8, })) } func AppendString(ctx *RuntimeContext, buf []byte, s string) []byte { if ctx.Option.Flag&HTMLEscapeOption != 0 { if ctx.Option.Flag&NormalizeUTF8Option != 0 { return appendNormalizedHTMLString(buf, s) } return appendHTMLString(buf, s) } if ctx.Option.Flag&NormalizeUTF8Option != 0 { return appendNormalizedString(buf, s) } return appendString(buf, s) } func appendNormalizedHTMLString(buf []byte, s string) []byte { valLen := len(s) if valLen == 0 { return append(buf, `""`...) } buf = append(buf, '"') var ( i, j int ) if valLen >= 8 { chunks := stringToUint64Slice(s) for _, n := range chunks { // combine masks before checking for the MSB of each byte. We include // `n` in the mask to check whether any of the *input* byte MSBs were // set (i.e. the byte was outside the ASCII range). mask := n | (n - (lsb * 0x20)) | ((n ^ (lsb * '"')) - lsb) | ((n ^ (lsb * '\\')) - lsb) | ((n ^ (lsb * '<')) - lsb) | ((n ^ (lsb * '>')) - lsb) | ((n ^ (lsb * '&')) - lsb) if (mask & msb) != 0 { j = bits.TrailingZeros64(mask&msb) / 8 goto ESCAPE_END } } for i := len(chunks) * 8; i < valLen; i++ { if needEscapeHTMLNormalizeUTF8[s[i]] { j = i goto ESCAPE_END } } // no found any escape characters. return append(append(buf, s...), '"') } ESCAPE_END: for j < valLen { c := s[j] if !needEscapeHTMLNormalizeUTF8[c] { // fast path: most of the time, printable ascii characters are used j++ continue } switch c { case '\\', '"': buf = append(buf, s[i:j]...) buf = append(buf, '\\', c) i = j + 1 j = j + 1 continue case '\n': buf = append(buf, s[i:j]...) buf = append(buf, '\\', 'n') i = j + 1 j = j + 1 continue case '\r': buf = append(buf, s[i:j]...) buf = append(buf, '\\', 'r') i = j + 1 j = j + 1 continue case '\t': buf = append(buf, s[i:j]...) buf = append(buf, '\\', 't') i = j + 1 j = j + 1 continue case '<', '>', '&': buf = append(buf, s[i:j]...) buf = append(buf, `\u00`...) buf = append(buf, hex[c>>4], hex[c&0xF]) i = j + 1 j = j + 1 continue case 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x0B, 0x0C, 0x0E, 0x0F, // 0x00-0x0F 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F: // 0x10-0x1F buf = append(buf, s[i:j]...) buf = append(buf, `\u00`...) buf = append(buf, hex[c>>4], hex[c&0xF]) i = j + 1 j = j + 1 continue } state, size := decodeRuneInString(s[j:]) switch state { case runeErrorState: buf = append(buf, s[i:j]...) buf = append(buf, `\ufffd`...) i = j + 1 j = j + 1 continue // U+2028 is LINE SEPARATOR. // U+2029 is PARAGRAPH SEPARATOR. // They are both technically valid characters in JSON strings, // but don't work in JSONP, which has to be evaluated as JavaScript, // and can lead to security holes there. It is valid JSON to // escape them, so we do so unconditionally. // See http://timelessrepo.com/json-isnt-a-javascript-subset for discussion. case lineSepState: buf = append(buf, s[i:j]...) buf = append(buf, `\u2028`...) i = j + 3 j = j + 3 continue case paragraphSepState: buf = append(buf, s[i:j]...) buf = append(buf, `\u2029`...) i = j + 3 j = j + 3 continue } j += size } return append(append(buf, s[i:]...), '"') } func appendHTMLString(buf []byte, s string) []byte { valLen := len(s) if valLen == 0 { return append(buf, `""`...) } buf = append(buf, '"') var ( i, j int ) if valLen >= 8 { chunks := stringToUint64Slice(s) for _, n := range chunks { // combine masks before checking for the MSB of each byte. We include // `n` in the mask to check whether any of the *input* byte MSBs were // set (i.e. the byte was outside the ASCII range). mask := n | (n - (lsb * 0x20)) | ((n ^ (lsb * '"')) - lsb) | ((n ^ (lsb * '\\')) - lsb) | ((n ^ (lsb * '<')) - lsb) | ((n ^ (lsb * '>')) - lsb) | ((n ^ (lsb * '&')) - lsb) if (mask & msb) != 0 { j = bits.TrailingZeros64(mask&msb) / 8 goto ESCAPE_END } } for i := len(chunks) * 8; i < valLen; i++ { if needEscapeHTML[s[i]] { j = i goto ESCAPE_END } } // no found any escape characters. return append(append(buf, s...), '"') } ESCAPE_END: for j < valLen { c := s[j] if !needEscapeHTML[c] { // fast path: most of the time, printable ascii characters are used j++ continue } switch c { case '\\', '"': buf = append(buf, s[i:j]...) buf = append(buf, '\\', c) i = j + 1 j = j + 1 continue case '\n': buf = append(buf, s[i:j]...) buf = append(buf, '\\', 'n') i = j + 1 j = j + 1 continue case '\r': buf = append(buf, s[i:j]...) buf = append(buf, '\\', 'r') i = j + 1 j = j + 1 continue case '\t': buf = append(buf, s[i:j]...) buf = append(buf, '\\', 't') i = j + 1 j = j + 1 continue case '<', '>', '&': buf = append(buf, s[i:j]...) buf = append(buf, `\u00`...) buf = append(buf, hex[c>>4], hex[c&0xF]) i = j + 1 j = j + 1 continue case 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x0B, 0x0C, 0x0E, 0x0F, // 0x00-0x0F 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F: // 0x10-0x1F buf = append(buf, s[i:j]...) buf = append(buf, `\u00`...) buf = append(buf, hex[c>>4], hex[c&0xF]) i = j + 1 j = j + 1 continue } j++ } return append(append(buf, s[i:]...), '"') } func appendNormalizedString(buf []byte, s string) []byte { valLen := len(s) if valLen == 0 { return append(buf, `""`...) } buf = append(buf, '"') var ( i, j int ) if valLen >= 8 { chunks := stringToUint64Slice(s) for _, n := range chunks { // combine masks before checking for the MSB of each byte. We include // `n` in the mask to check whether any of the *input* byte MSBs were // set (i.e. the byte was outside the ASCII range). mask := n | (n - (lsb * 0x20)) | ((n ^ (lsb * '"')) - lsb) | ((n ^ (lsb * '\\')) - lsb) if (mask & msb) != 0 { j = bits.TrailingZeros64(mask&msb) / 8 goto ESCAPE_END } } valLen := len(s) for i := len(chunks) * 8; i < valLen; i++ { if needEscapeNormalizeUTF8[s[i]] { j = i goto ESCAPE_END } } return append(append(buf, s...), '"') } ESCAPE_END: for j < valLen { c := s[j] if !needEscapeNormalizeUTF8[c] { // fast path: most of the time, printable ascii characters are used j++ continue } switch c { case '\\', '"': buf = append(buf, s[i:j]...) buf = append(buf, '\\', c) i = j + 1 j = j + 1 continue case '\n': buf = append(buf, s[i:j]...) buf = append(buf, '\\', 'n') i = j + 1 j = j + 1 continue case '\r': buf = append(buf, s[i:j]...) buf = append(buf, '\\', 'r') i = j + 1 j = j + 1 continue case '\t': buf = append(buf, s[i:j]...) buf = append(buf, '\\', 't') i = j + 1 j = j + 1 continue case 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x0B, 0x0C, 0x0E, 0x0F, // 0x00-0x0F 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F: // 0x10-0x1F buf = append(buf, s[i:j]...) buf = append(buf, `\u00`...) buf = append(buf, hex[c>>4], hex[c&0xF]) i = j + 1 j = j + 1 continue } state, size := decodeRuneInString(s[j:]) switch state { case runeErrorState: buf = append(buf, s[i:j]...) buf = append(buf, `\ufffd`...) i = j + 1 j = j + 1 continue // U+2028 is LINE SEPARATOR. // U+2029 is PARAGRAPH SEPARATOR. // They are both technically valid characters in JSON strings, // but don't work in JSONP, which has to be evaluated as JavaScript, // and can lead to security holes there. It is valid JSON to // escape them, so we do so unconditionally. // See http://timelessrepo.com/json-isnt-a-javascript-subset for discussion. case lineSepState: buf = append(buf, s[i:j]...) buf = append(buf, `\u2028`...) i = j + 3 j = j + 3 continue case paragraphSepState: buf = append(buf, s[i:j]...) buf = append(buf, `\u2029`...) i = j + 3 j = j + 3 continue } j += size } return append(append(buf, s[i:]...), '"') } func appendString(buf []byte, s string) []byte { valLen := len(s) if valLen == 0 { return append(buf, `""`...) } buf = append(buf, '"') var ( i, j int ) if valLen >= 8 { chunks := stringToUint64Slice(s) for _, n := range chunks { // combine masks before checking for the MSB of each byte. We include // `n` in the mask to check whether any of the *input* byte MSBs were // set (i.e. the byte was outside the ASCII range). mask := n | (n - (lsb * 0x20)) | ((n ^ (lsb * '"')) - lsb) | ((n ^ (lsb * '\\')) - lsb) if (mask & msb) != 0 { j = bits.TrailingZeros64(mask&msb) / 8 goto ESCAPE_END } } valLen := len(s) for i := len(chunks) * 8; i < valLen; i++ { if needEscape[s[i]] { j = i goto ESCAPE_END } } return append(append(buf, s...), '"') } ESCAPE_END: for j < valLen { c := s[j] if !needEscape[c] { // fast path: most of the time, printable ascii characters are used j++ continue } switch c { case '\\', '"': buf = append(buf, s[i:j]...) buf = append(buf, '\\', c) i = j + 1 j = j + 1 continue case '\n': buf = append(buf, s[i:j]...) buf = append(buf, '\\', 'n') i = j + 1 j = j + 1 continue case '\r': buf = append(buf, s[i:j]...) buf = append(buf, '\\', 'r') i = j + 1 j = j + 1 continue case '\t': buf = append(buf, s[i:j]...) buf = append(buf, '\\', 't') i = j + 1 j = j + 1 continue case 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x0B, 0x0C, 0x0E, 0x0F, // 0x00-0x0F 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F: // 0x10-0x1F buf = append(buf, s[i:j]...) buf = append(buf, `\u00`...) buf = append(buf, hex[c>>4], hex[c&0xF]) i = j + 1 j = j + 1 continue } j++ } return append(append(buf, s[i:]...), '"') }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/goccy/go-json/internal/encoder/int.go
vendor/github.com/goccy/go-json/internal/encoder/int.go
// This files's processing codes are inspired by https://github.com/segmentio/encoding. // The license notation is as follows. // // # MIT License // // Copyright (c) 2019 Segment.io, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in all // copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE // SOFTWARE. package encoder import ( "unsafe" ) var endianness int func init() { var b [2]byte *(*uint16)(unsafe.Pointer(&b)) = uint16(0xABCD) switch b[0] { case 0xCD: endianness = 0 // LE case 0xAB: endianness = 1 // BE default: panic("could not determine endianness") } } // "00010203...96979899" cast to []uint16 var intLELookup = [100]uint16{ 0x3030, 0x3130, 0x3230, 0x3330, 0x3430, 0x3530, 0x3630, 0x3730, 0x3830, 0x3930, 0x3031, 0x3131, 0x3231, 0x3331, 0x3431, 0x3531, 0x3631, 0x3731, 0x3831, 0x3931, 0x3032, 0x3132, 0x3232, 0x3332, 0x3432, 0x3532, 0x3632, 0x3732, 0x3832, 0x3932, 0x3033, 0x3133, 0x3233, 0x3333, 0x3433, 0x3533, 0x3633, 0x3733, 0x3833, 0x3933, 0x3034, 0x3134, 0x3234, 0x3334, 0x3434, 0x3534, 0x3634, 0x3734, 0x3834, 0x3934, 0x3035, 0x3135, 0x3235, 0x3335, 0x3435, 0x3535, 0x3635, 0x3735, 0x3835, 0x3935, 0x3036, 0x3136, 0x3236, 0x3336, 0x3436, 0x3536, 0x3636, 0x3736, 0x3836, 0x3936, 0x3037, 0x3137, 0x3237, 0x3337, 0x3437, 0x3537, 0x3637, 0x3737, 0x3837, 0x3937, 0x3038, 0x3138, 0x3238, 0x3338, 0x3438, 0x3538, 0x3638, 0x3738, 0x3838, 0x3938, 0x3039, 0x3139, 0x3239, 0x3339, 0x3439, 0x3539, 0x3639, 0x3739, 0x3839, 0x3939, } var intBELookup = [100]uint16{ 0x3030, 0x3031, 0x3032, 0x3033, 0x3034, 0x3035, 0x3036, 0x3037, 0x3038, 0x3039, 0x3130, 0x3131, 0x3132, 0x3133, 0x3134, 0x3135, 0x3136, 0x3137, 0x3138, 0x3139, 0x3230, 0x3231, 0x3232, 0x3233, 0x3234, 0x3235, 0x3236, 0x3237, 0x3238, 0x3239, 0x3330, 0x3331, 0x3332, 0x3333, 0x3334, 0x3335, 0x3336, 0x3337, 0x3338, 0x3339, 0x3430, 0x3431, 0x3432, 0x3433, 0x3434, 0x3435, 0x3436, 0x3437, 0x3438, 0x3439, 0x3530, 0x3531, 0x3532, 0x3533, 0x3534, 0x3535, 0x3536, 0x3537, 0x3538, 0x3539, 0x3630, 0x3631, 0x3632, 0x3633, 0x3634, 0x3635, 0x3636, 0x3637, 0x3638, 0x3639, 0x3730, 0x3731, 0x3732, 0x3733, 0x3734, 0x3735, 0x3736, 0x3737, 0x3738, 0x3739, 0x3830, 0x3831, 0x3832, 0x3833, 0x3834, 0x3835, 0x3836, 0x3837, 0x3838, 0x3839, 0x3930, 0x3931, 0x3932, 0x3933, 0x3934, 0x3935, 0x3936, 0x3937, 0x3938, 0x3939, } var intLookup = [2]*[100]uint16{&intLELookup, &intBELookup} func numMask(numBitSize uint8) uint64 { return 1<<numBitSize - 1 } func AppendInt(_ *RuntimeContext, out []byte, p uintptr, code *Opcode) []byte { var u64 uint64 switch code.NumBitSize { case 8: u64 = (uint64)(**(**uint8)(unsafe.Pointer(&p))) case 16: u64 = (uint64)(**(**uint16)(unsafe.Pointer(&p))) case 32: u64 = (uint64)(**(**uint32)(unsafe.Pointer(&p))) case 64: u64 = **(**uint64)(unsafe.Pointer(&p)) } mask := numMask(code.NumBitSize) n := u64 & mask negative := (u64>>(code.NumBitSize-1))&1 == 1 if !negative { if n < 10 { return append(out, byte(n+'0')) } else if n < 100 { u := intLELookup[n] return append(out, byte(u), byte(u>>8)) } } else { n = -n & mask } lookup := intLookup[endianness] var b [22]byte u := (*[11]uint16)(unsafe.Pointer(&b)) i := 11 for n >= 100 { j := n % 100 n /= 100 i-- u[i] = lookup[j] } i-- u[i] = lookup[n] i *= 2 // convert to byte index if n < 10 { i++ // remove leading zero } if negative { i-- b[i] = '-' } return append(out, b[i:]...) } func AppendUint(_ *RuntimeContext, out []byte, p uintptr, code *Opcode) []byte { var u64 uint64 switch code.NumBitSize { case 8: u64 = (uint64)(**(**uint8)(unsafe.Pointer(&p))) case 16: u64 = (uint64)(**(**uint16)(unsafe.Pointer(&p))) case 32: u64 = (uint64)(**(**uint32)(unsafe.Pointer(&p))) case 64: u64 = **(**uint64)(unsafe.Pointer(&p)) } mask := numMask(code.NumBitSize) n := u64 & mask if n < 10 { return append(out, byte(n+'0')) } else if n < 100 { u := intLELookup[n] return append(out, byte(u), byte(u>>8)) } lookup := intLookup[endianness] var b [22]byte u := (*[11]uint16)(unsafe.Pointer(&b)) i := 11 for n >= 100 { j := n % 100 n /= 100 i-- u[i] = lookup[j] } i-- u[i] = lookup[n] i *= 2 // convert to byte index if n < 10 { i++ // remove leading zero } return append(out, b[i:]...) }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/goccy/go-json/internal/encoder/context.go
vendor/github.com/goccy/go-json/internal/encoder/context.go
package encoder import ( "context" "sync" "unsafe" "github.com/goccy/go-json/internal/runtime" ) type compileContext struct { opcodeIndex uint32 ptrIndex int indent uint32 escapeKey bool structTypeToCodes map[uintptr]Opcodes recursiveCodes *Opcodes } func (c *compileContext) incIndent() { c.indent++ } func (c *compileContext) decIndent() { c.indent-- } func (c *compileContext) incIndex() { c.incOpcodeIndex() c.incPtrIndex() } func (c *compileContext) decIndex() { c.decOpcodeIndex() c.decPtrIndex() } func (c *compileContext) incOpcodeIndex() { c.opcodeIndex++ } func (c *compileContext) decOpcodeIndex() { c.opcodeIndex-- } func (c *compileContext) incPtrIndex() { c.ptrIndex++ } func (c *compileContext) decPtrIndex() { c.ptrIndex-- } const ( bufSize = 1024 ) var ( runtimeContextPool = sync.Pool{ New: func() interface{} { return &RuntimeContext{ Buf: make([]byte, 0, bufSize), Ptrs: make([]uintptr, 128), KeepRefs: make([]unsafe.Pointer, 0, 8), Option: &Option{}, } }, } ) type RuntimeContext struct { Context context.Context Buf []byte MarshalBuf []byte Ptrs []uintptr KeepRefs []unsafe.Pointer SeenPtr []uintptr BaseIndent uint32 Prefix []byte IndentStr []byte Option *Option } func (c *RuntimeContext) Init(p uintptr, codelen int) { if len(c.Ptrs) < codelen { c.Ptrs = make([]uintptr, codelen) } c.Ptrs[0] = p c.KeepRefs = c.KeepRefs[:0] c.SeenPtr = c.SeenPtr[:0] c.BaseIndent = 0 } func (c *RuntimeContext) Ptr() uintptr { header := (*runtime.SliceHeader)(unsafe.Pointer(&c.Ptrs)) return uintptr(header.Data) } func TakeRuntimeContext() *RuntimeContext { return runtimeContextPool.Get().(*RuntimeContext) } func ReleaseRuntimeContext(ctx *RuntimeContext) { runtimeContextPool.Put(ctx) }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/goccy/go-json/internal/encoder/query.go
vendor/github.com/goccy/go-json/internal/encoder/query.go
package encoder import ( "context" "fmt" "reflect" ) var ( Marshal func(interface{}) ([]byte, error) Unmarshal func([]byte, interface{}) error ) type FieldQuery struct { Name string Fields []*FieldQuery hash string } func (q *FieldQuery) Hash() string { if q.hash != "" { return q.hash } b, _ := Marshal(q) q.hash = string(b) return q.hash } func (q *FieldQuery) MarshalJSON() ([]byte, error) { if q.Name != "" { if len(q.Fields) > 0 { return Marshal(map[string][]*FieldQuery{q.Name: q.Fields}) } return Marshal(q.Name) } return Marshal(q.Fields) } func (q *FieldQuery) QueryString() (FieldQueryString, error) { b, err := Marshal(q) if err != nil { return "", err } return FieldQueryString(b), nil } type FieldQueryString string func (s FieldQueryString) Build() (*FieldQuery, error) { var query interface{} if err := Unmarshal([]byte(s), &query); err != nil { return nil, err } return s.build(reflect.ValueOf(query)) } func (s FieldQueryString) build(v reflect.Value) (*FieldQuery, error) { switch v.Type().Kind() { case reflect.String: return s.buildString(v) case reflect.Map: return s.buildMap(v) case reflect.Slice: return s.buildSlice(v) case reflect.Interface: return s.build(reflect.ValueOf(v.Interface())) } return nil, fmt.Errorf("failed to build field query") } func (s FieldQueryString) buildString(v reflect.Value) (*FieldQuery, error) { b := []byte(v.String()) switch b[0] { case '[', '{': var query interface{} if err := Unmarshal(b, &query); err != nil { return nil, err } if str, ok := query.(string); ok { return &FieldQuery{Name: str}, nil } return s.build(reflect.ValueOf(query)) } return &FieldQuery{Name: string(b)}, nil } func (s FieldQueryString) buildSlice(v reflect.Value) (*FieldQuery, error) { fields := make([]*FieldQuery, 0, v.Len()) for i := 0; i < v.Len(); i++ { def, err := s.build(v.Index(i)) if err != nil { return nil, err } fields = append(fields, def) } return &FieldQuery{Fields: fields}, nil } func (s FieldQueryString) buildMap(v reflect.Value) (*FieldQuery, error) { keys := v.MapKeys() if len(keys) != 1 { return nil, fmt.Errorf("failed to build field query object") } key := keys[0] if key.Type().Kind() != reflect.String { return nil, fmt.Errorf("failed to build field query. invalid object key type") } name := key.String() def, err := s.build(v.MapIndex(key)) if err != nil { return nil, err } return &FieldQuery{ Name: name, Fields: def.Fields, }, nil } type queryKey struct{} func FieldQueryFromContext(ctx context.Context) *FieldQuery { query := ctx.Value(queryKey{}) if query == nil { return nil } q, ok := query.(*FieldQuery) if !ok { return nil } return q } func SetFieldQueryToContext(ctx context.Context, query *FieldQuery) context.Context { return context.WithValue(ctx, queryKey{}, query) }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/goccy/go-json/internal/encoder/string_table.go
vendor/github.com/goccy/go-json/internal/encoder/string_table.go
package encoder var needEscapeHTMLNormalizeUTF8 = [256]bool{ '"': true, '&': true, '<': true, '>': true, '\\': true, 0x00: true, 0x01: true, 0x02: true, 0x03: true, 0x04: true, 0x05: true, 0x06: true, 0x07: true, 0x08: true, 0x09: true, 0x0a: true, 0x0b: true, 0x0c: true, 0x0d: true, 0x0e: true, 0x0f: true, 0x10: true, 0x11: true, 0x12: true, 0x13: true, 0x14: true, 0x15: true, 0x16: true, 0x17: true, 0x18: true, 0x19: true, 0x1a: true, 0x1b: true, 0x1c: true, 0x1d: true, 0x1e: true, 0x1f: true, /* 0x20 - 0x7f */ 0x80: true, 0x81: true, 0x82: true, 0x83: true, 0x84: true, 0x85: true, 0x86: true, 0x87: true, 0x88: true, 0x89: true, 0x8a: true, 0x8b: true, 0x8c: true, 0x8d: true, 0x8e: true, 0x8f: true, 0x90: true, 0x91: true, 0x92: true, 0x93: true, 0x94: true, 0x95: true, 0x96: true, 0x97: true, 0x98: true, 0x99: true, 0x9a: true, 0x9b: true, 0x9c: true, 0x9d: true, 0x9e: true, 0x9f: true, 0xa0: true, 0xa1: true, 0xa2: true, 0xa3: true, 0xa4: true, 0xa5: true, 0xa6: true, 0xa7: true, 0xa8: true, 0xa9: true, 0xaa: true, 0xab: true, 0xac: true, 0xad: true, 0xae: true, 0xaf: true, 0xb0: true, 0xb1: true, 0xb2: true, 0xb3: true, 0xb4: true, 0xb5: true, 0xb6: true, 0xb7: true, 0xb8: true, 0xb9: true, 0xba: true, 0xbb: true, 0xbc: true, 0xbd: true, 0xbe: true, 0xbf: true, 0xc0: true, 0xc1: true, 0xc2: true, 0xc3: true, 0xc4: true, 0xc5: true, 0xc6: true, 0xc7: true, 0xc8: true, 0xc9: true, 0xca: true, 0xcb: true, 0xcc: true, 0xcd: true, 0xce: true, 0xcf: true, 0xd0: true, 0xd1: true, 0xd2: true, 0xd3: true, 0xd4: true, 0xd5: true, 0xd6: true, 0xd7: true, 0xd8: true, 0xd9: true, 0xda: true, 0xdb: true, 0xdc: true, 0xdd: true, 0xde: true, 0xdf: true, 0xe0: true, 0xe1: true, 0xe2: true, 0xe3: true, 0xe4: true, 0xe5: true, 0xe6: true, 0xe7: true, 0xe8: true, 0xe9: true, 0xea: true, 0xeb: true, 0xec: true, 0xed: true, 0xee: true, 0xef: true, 0xf0: true, 0xf1: true, 0xf2: true, 0xf3: true, 0xf4: true, 0xf5: true, 0xf6: true, 0xf7: true, 0xf8: true, 0xf9: true, 0xfa: true, 0xfb: true, 0xfc: true, 0xfd: true, 0xfe: true, 0xff: true, } var needEscapeNormalizeUTF8 = [256]bool{ '"': true, '\\': true, 0x00: true, 0x01: true, 0x02: true, 0x03: true, 0x04: true, 0x05: true, 0x06: true, 0x07: true, 0x08: true, 0x09: true, 0x0a: true, 0x0b: true, 0x0c: true, 0x0d: true, 0x0e: true, 0x0f: true, 0x10: true, 0x11: true, 0x12: true, 0x13: true, 0x14: true, 0x15: true, 0x16: true, 0x17: true, 0x18: true, 0x19: true, 0x1a: true, 0x1b: true, 0x1c: true, 0x1d: true, 0x1e: true, 0x1f: true, /* 0x20 - 0x7f */ 0x80: true, 0x81: true, 0x82: true, 0x83: true, 0x84: true, 0x85: true, 0x86: true, 0x87: true, 0x88: true, 0x89: true, 0x8a: true, 0x8b: true, 0x8c: true, 0x8d: true, 0x8e: true, 0x8f: true, 0x90: true, 0x91: true, 0x92: true, 0x93: true, 0x94: true, 0x95: true, 0x96: true, 0x97: true, 0x98: true, 0x99: true, 0x9a: true, 0x9b: true, 0x9c: true, 0x9d: true, 0x9e: true, 0x9f: true, 0xa0: true, 0xa1: true, 0xa2: true, 0xa3: true, 0xa4: true, 0xa5: true, 0xa6: true, 0xa7: true, 0xa8: true, 0xa9: true, 0xaa: true, 0xab: true, 0xac: true, 0xad: true, 0xae: true, 0xaf: true, 0xb0: true, 0xb1: true, 0xb2: true, 0xb3: true, 0xb4: true, 0xb5: true, 0xb6: true, 0xb7: true, 0xb8: true, 0xb9: true, 0xba: true, 0xbb: true, 0xbc: true, 0xbd: true, 0xbe: true, 0xbf: true, 0xc0: true, 0xc1: true, 0xc2: true, 0xc3: true, 0xc4: true, 0xc5: true, 0xc6: true, 0xc7: true, 0xc8: true, 0xc9: true, 0xca: true, 0xcb: true, 0xcc: true, 0xcd: true, 0xce: true, 0xcf: true, 0xd0: true, 0xd1: true, 0xd2: true, 0xd3: true, 0xd4: true, 0xd5: true, 0xd6: true, 0xd7: true, 0xd8: true, 0xd9: true, 0xda: true, 0xdb: true, 0xdc: true, 0xdd: true, 0xde: true, 0xdf: true, 0xe0: true, 0xe1: true, 0xe2: true, 0xe3: true, 0xe4: true, 0xe5: true, 0xe6: true, 0xe7: true, 0xe8: true, 0xe9: true, 0xea: true, 0xeb: true, 0xec: true, 0xed: true, 0xee: true, 0xef: true, 0xf0: true, 0xf1: true, 0xf2: true, 0xf3: true, 0xf4: true, 0xf5: true, 0xf6: true, 0xf7: true, 0xf8: true, 0xf9: true, 0xfa: true, 0xfb: true, 0xfc: true, 0xfd: true, 0xfe: true, 0xff: true, } var needEscapeHTML = [256]bool{ '"': true, '&': true, '<': true, '>': true, '\\': true, 0x00: true, 0x01: true, 0x02: true, 0x03: true, 0x04: true, 0x05: true, 0x06: true, 0x07: true, 0x08: true, 0x09: true, 0x0a: true, 0x0b: true, 0x0c: true, 0x0d: true, 0x0e: true, 0x0f: true, 0x10: true, 0x11: true, 0x12: true, 0x13: true, 0x14: true, 0x15: true, 0x16: true, 0x17: true, 0x18: true, 0x19: true, 0x1a: true, 0x1b: true, 0x1c: true, 0x1d: true, 0x1e: true, 0x1f: true, /* 0x20 - 0xff */ } var needEscape = [256]bool{ '"': true, '\\': true, 0x00: true, 0x01: true, 0x02: true, 0x03: true, 0x04: true, 0x05: true, 0x06: true, 0x07: true, 0x08: true, 0x09: true, 0x0a: true, 0x0b: true, 0x0c: true, 0x0d: true, 0x0e: true, 0x0f: true, 0x10: true, 0x11: true, 0x12: true, 0x13: true, 0x14: true, 0x15: true, 0x16: true, 0x17: true, 0x18: true, 0x19: true, 0x1a: true, 0x1b: true, 0x1c: true, 0x1d: true, 0x1e: true, 0x1f: true, /* 0x20 - 0xff */ }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/goccy/go-json/internal/encoder/compiler_race.go
vendor/github.com/goccy/go-json/internal/encoder/compiler_race.go
//go:build race // +build race package encoder import ( "sync" ) var setsMu sync.RWMutex func CompileToGetCodeSet(ctx *RuntimeContext, typeptr uintptr) (*OpcodeSet, error) { initEncoder() if typeptr > typeAddr.MaxTypeAddr || typeptr < typeAddr.BaseTypeAddr { codeSet, err := compileToGetCodeSetSlowPath(typeptr) if err != nil { return nil, err } return getFilteredCodeSetIfNeeded(ctx, codeSet) } index := (typeptr - typeAddr.BaseTypeAddr) >> typeAddr.AddrShift setsMu.RLock() if codeSet := cachedOpcodeSets[index]; codeSet != nil { filtered, err := getFilteredCodeSetIfNeeded(ctx, codeSet) if err != nil { setsMu.RUnlock() return nil, err } setsMu.RUnlock() return filtered, nil } setsMu.RUnlock() codeSet, err := newCompiler().compile(typeptr) if err != nil { return nil, err } filtered, err := getFilteredCodeSetIfNeeded(ctx, codeSet) if err != nil { return nil, err } setsMu.Lock() cachedOpcodeSets[index] = codeSet setsMu.Unlock() return filtered, nil }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/goccy/go-json/internal/encoder/map113.go
vendor/github.com/goccy/go-json/internal/encoder/map113.go
//go:build go1.13 // +build go1.13 package encoder import "unsafe" //go:linkname MapIterValue reflect.mapiterelem func MapIterValue(it *mapIter) unsafe.Pointer
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/goccy/go-json/internal/encoder/encoder.go
vendor/github.com/goccy/go-json/internal/encoder/encoder.go
package encoder import ( "bytes" "encoding" "encoding/base64" "encoding/json" "fmt" "math" "reflect" "strconv" "strings" "sync" "unsafe" "github.com/goccy/go-json/internal/errors" "github.com/goccy/go-json/internal/runtime" ) func (t OpType) IsMultipleOpHead() bool { switch t { case OpStructHead: return true case OpStructHeadSlice: return true case OpStructHeadArray: return true case OpStructHeadMap: return true case OpStructHeadStruct: return true case OpStructHeadOmitEmpty: return true case OpStructHeadOmitEmptySlice: return true case OpStructHeadOmitEmptyArray: return true case OpStructHeadOmitEmptyMap: return true case OpStructHeadOmitEmptyStruct: return true case OpStructHeadSlicePtr: return true case OpStructHeadOmitEmptySlicePtr: return true case OpStructHeadArrayPtr: return true case OpStructHeadOmitEmptyArrayPtr: return true case OpStructHeadMapPtr: return true case OpStructHeadOmitEmptyMapPtr: return true } return false } func (t OpType) IsMultipleOpField() bool { switch t { case OpStructField: return true case OpStructFieldSlice: return true case OpStructFieldArray: return true case OpStructFieldMap: return true case OpStructFieldStruct: return true case OpStructFieldOmitEmpty: return true case OpStructFieldOmitEmptySlice: return true case OpStructFieldOmitEmptyArray: return true case OpStructFieldOmitEmptyMap: return true case OpStructFieldOmitEmptyStruct: return true case OpStructFieldSlicePtr: return true case OpStructFieldOmitEmptySlicePtr: return true case OpStructFieldArrayPtr: return true case OpStructFieldOmitEmptyArrayPtr: return true case OpStructFieldMapPtr: return true case OpStructFieldOmitEmptyMapPtr: return true } return false } type OpcodeSet struct { Type *runtime.Type NoescapeKeyCode *Opcode EscapeKeyCode *Opcode InterfaceNoescapeKeyCode *Opcode InterfaceEscapeKeyCode *Opcode CodeLength int EndCode *Opcode Code Code QueryCache map[string]*OpcodeSet cacheMu sync.RWMutex } func (s *OpcodeSet) getQueryCache(hash string) *OpcodeSet { s.cacheMu.RLock() codeSet := s.QueryCache[hash] s.cacheMu.RUnlock() return codeSet } func (s *OpcodeSet) setQueryCache(hash string, codeSet *OpcodeSet) { s.cacheMu.Lock() s.QueryCache[hash] = codeSet s.cacheMu.Unlock() } type CompiledCode struct { Code *Opcode Linked bool // whether recursive code already have linked CurLen uintptr NextLen uintptr } const StartDetectingCyclesAfter = 1000 func Load(base uintptr, idx uintptr) uintptr { addr := base + idx return **(**uintptr)(unsafe.Pointer(&addr)) } func Store(base uintptr, idx uintptr, p uintptr) { addr := base + idx **(**uintptr)(unsafe.Pointer(&addr)) = p } func LoadNPtr(base uintptr, idx uintptr, ptrNum int) uintptr { addr := base + idx p := **(**uintptr)(unsafe.Pointer(&addr)) if p == 0 { return 0 } return PtrToPtr(p) /* for i := 0; i < ptrNum; i++ { if p == 0 { return p } p = PtrToPtr(p) } return p */ } func PtrToUint64(p uintptr) uint64 { return **(**uint64)(unsafe.Pointer(&p)) } func PtrToFloat32(p uintptr) float32 { return **(**float32)(unsafe.Pointer(&p)) } func PtrToFloat64(p uintptr) float64 { return **(**float64)(unsafe.Pointer(&p)) } func PtrToBool(p uintptr) bool { return **(**bool)(unsafe.Pointer(&p)) } func PtrToBytes(p uintptr) []byte { return **(**[]byte)(unsafe.Pointer(&p)) } func PtrToNumber(p uintptr) json.Number { return **(**json.Number)(unsafe.Pointer(&p)) } func PtrToString(p uintptr) string { return **(**string)(unsafe.Pointer(&p)) } func PtrToSlice(p uintptr) *runtime.SliceHeader { return *(**runtime.SliceHeader)(unsafe.Pointer(&p)) } func PtrToPtr(p uintptr) uintptr { return uintptr(**(**unsafe.Pointer)(unsafe.Pointer(&p))) } func PtrToNPtr(p uintptr, ptrNum int) uintptr { for i := 0; i < ptrNum; i++ { if p == 0 { return 0 } p = PtrToPtr(p) } return p } func PtrToUnsafePtr(p uintptr) unsafe.Pointer { return *(*unsafe.Pointer)(unsafe.Pointer(&p)) } func PtrToInterface(code *Opcode, p uintptr) interface{} { return *(*interface{})(unsafe.Pointer(&emptyInterface{ typ: code.Type, ptr: *(*unsafe.Pointer)(unsafe.Pointer(&p)), })) } func ErrUnsupportedValue(code *Opcode, ptr uintptr) *errors.UnsupportedValueError { v := *(*interface{})(unsafe.Pointer(&emptyInterface{ typ: code.Type, ptr: *(*unsafe.Pointer)(unsafe.Pointer(&ptr)), })) return &errors.UnsupportedValueError{ Value: reflect.ValueOf(v), Str: fmt.Sprintf("encountered a cycle via %s", code.Type), } } func ErrUnsupportedFloat(v float64) *errors.UnsupportedValueError { return &errors.UnsupportedValueError{ Value: reflect.ValueOf(v), Str: strconv.FormatFloat(v, 'g', -1, 64), } } func ErrMarshalerWithCode(code *Opcode, err error) *errors.MarshalerError { return &errors.MarshalerError{ Type: runtime.RType2Type(code.Type), Err: err, } } type emptyInterface struct { typ *runtime.Type ptr unsafe.Pointer } type MapItem struct { Key []byte Value []byte } type Mapslice struct { Items []MapItem } func (m *Mapslice) Len() int { return len(m.Items) } func (m *Mapslice) Less(i, j int) bool { return bytes.Compare(m.Items[i].Key, m.Items[j].Key) < 0 } func (m *Mapslice) Swap(i, j int) { m.Items[i], m.Items[j] = m.Items[j], m.Items[i] } //nolint:structcheck,unused type mapIter struct { key unsafe.Pointer elem unsafe.Pointer t unsafe.Pointer h unsafe.Pointer buckets unsafe.Pointer bptr unsafe.Pointer overflow unsafe.Pointer oldoverflow unsafe.Pointer startBucket uintptr offset uint8 wrapped bool B uint8 i uint8 bucket uintptr checkBucket uintptr } type MapContext struct { Start int First int Idx int Slice *Mapslice Buf []byte Len int Iter mapIter } var mapContextPool = sync.Pool{ New: func() interface{} { return &MapContext{ Slice: &Mapslice{}, } }, } func NewMapContext(mapLen int, unorderedMap bool) *MapContext { ctx := mapContextPool.Get().(*MapContext) if !unorderedMap { if len(ctx.Slice.Items) < mapLen { ctx.Slice.Items = make([]MapItem, mapLen) } else { ctx.Slice.Items = ctx.Slice.Items[:mapLen] } } ctx.Buf = ctx.Buf[:0] ctx.Iter = mapIter{} ctx.Idx = 0 ctx.Len = mapLen return ctx } func ReleaseMapContext(c *MapContext) { mapContextPool.Put(c) } //go:linkname MapIterInit runtime.mapiterinit //go:noescape func MapIterInit(mapType *runtime.Type, m unsafe.Pointer, it *mapIter) //go:linkname MapIterKey reflect.mapiterkey //go:noescape func MapIterKey(it *mapIter) unsafe.Pointer //go:linkname MapIterNext reflect.mapiternext //go:noescape func MapIterNext(it *mapIter) //go:linkname MapLen reflect.maplen //go:noescape func MapLen(m unsafe.Pointer) int func AppendByteSlice(_ *RuntimeContext, b []byte, src []byte) []byte { if src == nil { return append(b, `null`...) } encodedLen := base64.StdEncoding.EncodedLen(len(src)) b = append(b, '"') pos := len(b) remainLen := cap(b[pos:]) var buf []byte if remainLen > encodedLen { buf = b[pos : pos+encodedLen] } else { buf = make([]byte, encodedLen) } base64.StdEncoding.Encode(buf, src) return append(append(b, buf...), '"') } func AppendFloat32(_ *RuntimeContext, b []byte, v float32) []byte { f64 := float64(v) abs := math.Abs(f64) fmt := byte('f') // Note: Must use float32 comparisons for underlying float32 value to get precise cutoffs right. if abs != 0 { f32 := float32(abs) if f32 < 1e-6 || f32 >= 1e21 { fmt = 'e' } } return strconv.AppendFloat(b, f64, fmt, -1, 32) } func AppendFloat64(_ *RuntimeContext, b []byte, v float64) []byte { abs := math.Abs(v) fmt := byte('f') // Note: Must use float32 comparisons for underlying float32 value to get precise cutoffs right. if abs != 0 { if abs < 1e-6 || abs >= 1e21 { fmt = 'e' } } return strconv.AppendFloat(b, v, fmt, -1, 64) } func AppendBool(_ *RuntimeContext, b []byte, v bool) []byte { if v { return append(b, "true"...) } return append(b, "false"...) } var ( floatTable = [256]bool{ '0': true, '1': true, '2': true, '3': true, '4': true, '5': true, '6': true, '7': true, '8': true, '9': true, '.': true, 'e': true, 'E': true, '+': true, '-': true, } ) func AppendNumber(_ *RuntimeContext, b []byte, n json.Number) ([]byte, error) { if len(n) == 0 { return append(b, '0'), nil } for i := 0; i < len(n); i++ { if !floatTable[n[i]] { return nil, fmt.Errorf("json: invalid number literal %q", n) } } b = append(b, n...) return b, nil } func AppendMarshalJSON(ctx *RuntimeContext, code *Opcode, b []byte, v interface{}) ([]byte, error) { rv := reflect.ValueOf(v) // convert by dynamic interface type if (code.Flags & AddrForMarshalerFlags) != 0 { if rv.CanAddr() { rv = rv.Addr() } else { newV := reflect.New(rv.Type()) newV.Elem().Set(rv) rv = newV } } if rv.Kind() == reflect.Ptr && rv.IsNil() { return AppendNull(ctx, b), nil } v = rv.Interface() var bb []byte if (code.Flags & MarshalerContextFlags) != 0 { marshaler, ok := v.(marshalerContext) if !ok { return AppendNull(ctx, b), nil } stdctx := ctx.Option.Context if ctx.Option.Flag&FieldQueryOption != 0 { stdctx = SetFieldQueryToContext(stdctx, code.FieldQuery) } b, err := marshaler.MarshalJSON(stdctx) if err != nil { return nil, &errors.MarshalerError{Type: reflect.TypeOf(v), Err: err} } bb = b } else { marshaler, ok := v.(json.Marshaler) if !ok { return AppendNull(ctx, b), nil } b, err := marshaler.MarshalJSON() if err != nil { return nil, &errors.MarshalerError{Type: reflect.TypeOf(v), Err: err} } bb = b } marshalBuf := ctx.MarshalBuf[:0] marshalBuf = append(append(marshalBuf, bb...), nul) compactedBuf, err := compact(b, marshalBuf, (ctx.Option.Flag&HTMLEscapeOption) != 0) if err != nil { return nil, &errors.MarshalerError{Type: reflect.TypeOf(v), Err: err} } ctx.MarshalBuf = marshalBuf return compactedBuf, nil } func AppendMarshalJSONIndent(ctx *RuntimeContext, code *Opcode, b []byte, v interface{}) ([]byte, error) { rv := reflect.ValueOf(v) // convert by dynamic interface type if (code.Flags & AddrForMarshalerFlags) != 0 { if rv.CanAddr() { rv = rv.Addr() } else { newV := reflect.New(rv.Type()) newV.Elem().Set(rv) rv = newV } } v = rv.Interface() var bb []byte if (code.Flags & MarshalerContextFlags) != 0 { marshaler, ok := v.(marshalerContext) if !ok { return AppendNull(ctx, b), nil } b, err := marshaler.MarshalJSON(ctx.Option.Context) if err != nil { return nil, &errors.MarshalerError{Type: reflect.TypeOf(v), Err: err} } bb = b } else { marshaler, ok := v.(json.Marshaler) if !ok { return AppendNull(ctx, b), nil } b, err := marshaler.MarshalJSON() if err != nil { return nil, &errors.MarshalerError{Type: reflect.TypeOf(v), Err: err} } bb = b } marshalBuf := ctx.MarshalBuf[:0] marshalBuf = append(append(marshalBuf, bb...), nul) indentedBuf, err := doIndent( b, marshalBuf, string(ctx.Prefix)+strings.Repeat(string(ctx.IndentStr), int(ctx.BaseIndent+code.Indent)), string(ctx.IndentStr), (ctx.Option.Flag&HTMLEscapeOption) != 0, ) if err != nil { return nil, &errors.MarshalerError{Type: reflect.TypeOf(v), Err: err} } ctx.MarshalBuf = marshalBuf return indentedBuf, nil } func AppendMarshalText(ctx *RuntimeContext, code *Opcode, b []byte, v interface{}) ([]byte, error) { rv := reflect.ValueOf(v) // convert by dynamic interface type if (code.Flags & AddrForMarshalerFlags) != 0 { if rv.CanAddr() { rv = rv.Addr() } else { newV := reflect.New(rv.Type()) newV.Elem().Set(rv) rv = newV } } v = rv.Interface() marshaler, ok := v.(encoding.TextMarshaler) if !ok { return AppendNull(ctx, b), nil } bytes, err := marshaler.MarshalText() if err != nil { return nil, &errors.MarshalerError{Type: reflect.TypeOf(v), Err: err} } return AppendString(ctx, b, *(*string)(unsafe.Pointer(&bytes))), nil } func AppendMarshalTextIndent(ctx *RuntimeContext, code *Opcode, b []byte, v interface{}) ([]byte, error) { rv := reflect.ValueOf(v) // convert by dynamic interface type if (code.Flags & AddrForMarshalerFlags) != 0 { if rv.CanAddr() { rv = rv.Addr() } else { newV := reflect.New(rv.Type()) newV.Elem().Set(rv) rv = newV } } v = rv.Interface() marshaler, ok := v.(encoding.TextMarshaler) if !ok { return AppendNull(ctx, b), nil } bytes, err := marshaler.MarshalText() if err != nil { return nil, &errors.MarshalerError{Type: reflect.TypeOf(v), Err: err} } return AppendString(ctx, b, *(*string)(unsafe.Pointer(&bytes))), nil } func AppendNull(_ *RuntimeContext, b []byte) []byte { return append(b, "null"...) } func AppendComma(_ *RuntimeContext, b []byte) []byte { return append(b, ',') } func AppendCommaIndent(_ *RuntimeContext, b []byte) []byte { return append(b, ',', '\n') } func AppendStructEnd(_ *RuntimeContext, b []byte) []byte { return append(b, '}', ',') } func AppendStructEndIndent(ctx *RuntimeContext, code *Opcode, b []byte) []byte { b = append(b, '\n') b = append(b, ctx.Prefix...) indentNum := ctx.BaseIndent + code.Indent - 1 for i := uint32(0); i < indentNum; i++ { b = append(b, ctx.IndentStr...) } return append(b, '}', ',', '\n') } func AppendIndent(ctx *RuntimeContext, b []byte, indent uint32) []byte { b = append(b, ctx.Prefix...) indentNum := ctx.BaseIndent + indent for i := uint32(0); i < indentNum; i++ { b = append(b, ctx.IndentStr...) } return b } func IsNilForMarshaler(v interface{}) bool { rv := reflect.ValueOf(v) switch rv.Kind() { case reflect.Bool: return !rv.Bool() case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: return rv.Int() == 0 case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: return rv.Uint() == 0 case reflect.Float32, reflect.Float64: return math.Float64bits(rv.Float()) == 0 case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Func: return rv.IsNil() case reflect.Slice: return rv.IsNil() || rv.Len() == 0 case reflect.String: return rv.Len() == 0 } return false }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/goccy/go-json/internal/encoder/vm_color/hack.go
vendor/github.com/goccy/go-json/internal/encoder/vm_color/hack.go
package vm_color import ( // HACK: compile order // `vm`, `vm_indent`, `vm_color`, `vm_color_indent` packages uses a lot of memory to compile, // so forcibly make dependencies and avoid compiling in concurrent. // dependency order: vm => vm_indent => vm_color => vm_color_indent _ "github.com/goccy/go-json/internal/encoder/vm_color_indent" )
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/goccy/go-json/internal/encoder/vm_color/vm.go
vendor/github.com/goccy/go-json/internal/encoder/vm_color/vm.go
// Code generated by internal/cmd/generator. DO NOT EDIT! package vm_color import ( "math" "reflect" "sort" "unsafe" "github.com/goccy/go-json/internal/encoder" "github.com/goccy/go-json/internal/runtime" ) func Run(ctx *encoder.RuntimeContext, b []byte, codeSet *encoder.OpcodeSet) ([]byte, error) { recursiveLevel := 0 ptrOffset := uintptr(0) ctxptr := ctx.Ptr() var code *encoder.Opcode if (ctx.Option.Flag & encoder.HTMLEscapeOption) != 0 { code = codeSet.EscapeKeyCode } else { code = codeSet.NoescapeKeyCode } for { switch code.Op { default: return nil, errUnimplementedOp(code.Op) case encoder.OpPtr: p := load(ctxptr, code.Idx) code = code.Next store(ctxptr, code.Idx, ptrToPtr(p)) case encoder.OpIntPtr: p := loadNPtr(ctxptr, code.Idx, code.PtrNum) if p == 0 { b = appendNullComma(ctx, b) code = code.Next break } store(ctxptr, code.Idx, p) fallthrough case encoder.OpInt: b = appendInt(ctx, b, load(ctxptr, code.Idx), code) b = appendComma(ctx, b) code = code.Next case encoder.OpUintPtr: p := loadNPtr(ctxptr, code.Idx, code.PtrNum) if p == 0 { b = appendNullComma(ctx, b) code = code.Next break } store(ctxptr, code.Idx, p) fallthrough case encoder.OpUint: b = appendUint(ctx, b, load(ctxptr, code.Idx), code) b = appendComma(ctx, b) code = code.Next case encoder.OpIntString: b = append(b, '"') b = appendInt(ctx, b, load(ctxptr, code.Idx), code) b = append(b, '"') b = appendComma(ctx, b) code = code.Next case encoder.OpUintString: b = append(b, '"') b = appendUint(ctx, b, load(ctxptr, code.Idx), code) b = append(b, '"') b = appendComma(ctx, b) code = code.Next case encoder.OpFloat32Ptr: p := loadNPtr(ctxptr, code.Idx, code.PtrNum) if p == 0 { b = appendNull(ctx, b) b = appendComma(ctx, b) code = code.Next break } store(ctxptr, code.Idx, p) fallthrough case encoder.OpFloat32: b = appendFloat32(ctx, b, ptrToFloat32(load(ctxptr, code.Idx))) b = appendComma(ctx, b) code = code.Next case encoder.OpFloat64Ptr: p := loadNPtr(ctxptr, code.Idx, code.PtrNum) if p == 0 { b = appendNullComma(ctx, b) code = code.Next break } store(ctxptr, code.Idx, p) fallthrough case encoder.OpFloat64: v := ptrToFloat64(load(ctxptr, code.Idx)) if math.IsInf(v, 0) || math.IsNaN(v) { return nil, errUnsupportedFloat(v) } b = appendFloat64(ctx, b, v) b = appendComma(ctx, b) code = code.Next case encoder.OpStringPtr: p := loadNPtr(ctxptr, code.Idx, code.PtrNum) if p == 0 { b = appendNullComma(ctx, b) code = code.Next break } store(ctxptr, code.Idx, p) fallthrough case encoder.OpString: b = appendString(ctx, b, ptrToString(load(ctxptr, code.Idx))) b = appendComma(ctx, b) code = code.Next case encoder.OpBoolPtr: p := loadNPtr(ctxptr, code.Idx, code.PtrNum) if p == 0 { b = appendNullComma(ctx, b) code = code.Next break } store(ctxptr, code.Idx, p) fallthrough case encoder.OpBool: b = appendBool(ctx, b, ptrToBool(load(ctxptr, code.Idx))) b = appendComma(ctx, b) code = code.Next case encoder.OpBytesPtr: p := loadNPtr(ctxptr, code.Idx, code.PtrNum) if p == 0 { b = appendNullComma(ctx, b) code = code.Next break } store(ctxptr, code.Idx, p) fallthrough case encoder.OpBytes: b = appendByteSlice(ctx, b, ptrToBytes(load(ctxptr, code.Idx))) b = appendComma(ctx, b) code = code.Next case encoder.OpNumberPtr: p := loadNPtr(ctxptr, code.Idx, code.PtrNum) if p == 0 { b = appendNullComma(ctx, b) code = code.Next break } store(ctxptr, code.Idx, p) fallthrough case encoder.OpNumber: bb, err := appendNumber(ctx, b, ptrToNumber(load(ctxptr, code.Idx))) if err != nil { return nil, err } b = appendComma(ctx, bb) code = code.Next case encoder.OpInterfacePtr: p := loadNPtr(ctxptr, code.Idx, code.PtrNum) if p == 0 { b = appendNullComma(ctx, b) code = code.Next break } store(ctxptr, code.Idx, p) fallthrough case encoder.OpInterface: p := load(ctxptr, code.Idx) if p == 0 { b = appendNullComma(ctx, b) code = code.Next break } if recursiveLevel > encoder.StartDetectingCyclesAfter { for _, seen := range ctx.SeenPtr { if p == seen { return nil, errUnsupportedValue(code, p) } } } ctx.SeenPtr = append(ctx.SeenPtr, p) var ( typ *runtime.Type ifacePtr unsafe.Pointer ) up := ptrToUnsafePtr(p) if code.Flags&encoder.NonEmptyInterfaceFlags != 0 { iface := (*nonEmptyInterface)(up) ifacePtr = iface.ptr if iface.itab != nil { typ = iface.itab.typ } } else { iface := (*emptyInterface)(up) ifacePtr = iface.ptr typ = iface.typ } if ifacePtr == nil { isDirectedNil := typ != nil && typ.Kind() == reflect.Struct && !runtime.IfaceIndir(typ) if !isDirectedNil { b = appendNullComma(ctx, b) code = code.Next break } } ctx.KeepRefs = append(ctx.KeepRefs, up) ifaceCodeSet, err := encoder.CompileToGetCodeSet(ctx, uintptr(unsafe.Pointer(typ))) if err != nil { return nil, err } totalLength := uintptr(code.Length) + 3 nextTotalLength := uintptr(ifaceCodeSet.CodeLength) + 3 var c *encoder.Opcode if (ctx.Option.Flag & encoder.HTMLEscapeOption) != 0 { c = ifaceCodeSet.InterfaceEscapeKeyCode } else { c = ifaceCodeSet.InterfaceNoescapeKeyCode } curlen := uintptr(len(ctx.Ptrs)) offsetNum := ptrOffset / uintptrSize oldOffset := ptrOffset ptrOffset += totalLength * uintptrSize oldBaseIndent := ctx.BaseIndent ctx.BaseIndent += code.Indent newLen := offsetNum + totalLength + nextTotalLength if curlen < newLen { ctx.Ptrs = append(ctx.Ptrs, make([]uintptr, newLen-curlen)...) } ctxptr = ctx.Ptr() + ptrOffset // assign new ctxptr end := ifaceCodeSet.EndCode store(ctxptr, c.Idx, uintptr(ifacePtr)) store(ctxptr, end.Idx, oldOffset) store(ctxptr, end.ElemIdx, uintptr(unsafe.Pointer(code.Next))) storeIndent(ctxptr, end, uintptr(oldBaseIndent)) code = c recursiveLevel++ case encoder.OpInterfaceEnd: recursiveLevel-- // restore ctxptr offset := load(ctxptr, code.Idx) restoreIndent(ctx, code, ctxptr) ctx.SeenPtr = ctx.SeenPtr[:len(ctx.SeenPtr)-1] codePtr := load(ctxptr, code.ElemIdx) code = (*encoder.Opcode)(ptrToUnsafePtr(codePtr)) ctxptr = ctx.Ptr() + offset ptrOffset = offset case encoder.OpMarshalJSONPtr: p := load(ctxptr, code.Idx) if p == 0 { b = appendNullComma(ctx, b) code = code.Next break } store(ctxptr, code.Idx, ptrToPtr(p)) fallthrough case encoder.OpMarshalJSON: p := load(ctxptr, code.Idx) if p == 0 { b = appendNullComma(ctx, b) code = code.Next break } if (code.Flags&encoder.IsNilableTypeFlags) != 0 && (code.Flags&encoder.IndirectFlags) != 0 { p = ptrToPtr(p) } bb, err := appendMarshalJSON(ctx, code, b, ptrToInterface(code, p)) if err != nil { return nil, err } b = appendComma(ctx, bb) code = code.Next case encoder.OpMarshalTextPtr: p := load(ctxptr, code.Idx) if p == 0 { b = appendNullComma(ctx, b) code = code.Next break } store(ctxptr, code.Idx, ptrToPtr(p)) fallthrough case encoder.OpMarshalText: p := load(ctxptr, code.Idx) if p == 0 { b = append(b, `""`...) b = appendComma(ctx, b) code = code.Next break } if (code.Flags&encoder.IsNilableTypeFlags) != 0 && (code.Flags&encoder.IndirectFlags) != 0 { p = ptrToPtr(p) } bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p)) if err != nil { return nil, err } b = appendComma(ctx, bb) code = code.Next case encoder.OpSlicePtr: p := loadNPtr(ctxptr, code.Idx, code.PtrNum) if p == 0 { b = appendNullComma(ctx, b) code = code.End.Next break } store(ctxptr, code.Idx, p) fallthrough case encoder.OpSlice: p := load(ctxptr, code.Idx) slice := ptrToSlice(p) if p == 0 || slice.Data == nil { b = appendNullComma(ctx, b) code = code.End.Next break } store(ctxptr, code.ElemIdx, 0) store(ctxptr, code.Length, uintptr(slice.Len)) store(ctxptr, code.Idx, uintptr(slice.Data)) if slice.Len > 0 { b = appendArrayHead(ctx, code, b) code = code.Next store(ctxptr, code.Idx, uintptr(slice.Data)) } else { b = appendEmptyArray(ctx, b) code = code.End.Next } case encoder.OpSliceElem: idx := load(ctxptr, code.ElemIdx) length := load(ctxptr, code.Length) idx++ if idx < length { b = appendArrayElemIndent(ctx, code, b) store(ctxptr, code.ElemIdx, idx) data := load(ctxptr, code.Idx) size := uintptr(code.Size) code = code.Next store(ctxptr, code.Idx, data+idx*size) } else { b = appendArrayEnd(ctx, code, b) code = code.End.Next } case encoder.OpArrayPtr: p := loadNPtr(ctxptr, code.Idx, code.PtrNum) if p == 0 { b = appendNullComma(ctx, b) code = code.End.Next break } store(ctxptr, code.Idx, p) fallthrough case encoder.OpArray: p := load(ctxptr, code.Idx) if p == 0 { b = appendNullComma(ctx, b) code = code.End.Next break } if code.Length > 0 { b = appendArrayHead(ctx, code, b) store(ctxptr, code.ElemIdx, 0) code = code.Next store(ctxptr, code.Idx, p) } else { b = appendEmptyArray(ctx, b) code = code.End.Next } case encoder.OpArrayElem: idx := load(ctxptr, code.ElemIdx) idx++ if idx < uintptr(code.Length) { b = appendArrayElemIndent(ctx, code, b) store(ctxptr, code.ElemIdx, idx) p := load(ctxptr, code.Idx) size := uintptr(code.Size) code = code.Next store(ctxptr, code.Idx, p+idx*size) } else { b = appendArrayEnd(ctx, code, b) code = code.End.Next } case encoder.OpMapPtr: p := loadNPtr(ctxptr, code.Idx, code.PtrNum) if p == 0 { b = appendNullComma(ctx, b) code = code.End.Next break } store(ctxptr, code.Idx, p) fallthrough case encoder.OpMap: p := load(ctxptr, code.Idx) if p == 0 { b = appendNullComma(ctx, b) code = code.End.Next break } uptr := ptrToUnsafePtr(p) mlen := maplen(uptr) if mlen <= 0 { b = appendEmptyObject(ctx, b) code = code.End.Next break } b = appendStructHead(ctx, b) unorderedMap := (ctx.Option.Flag & encoder.UnorderedMapOption) != 0 mapCtx := encoder.NewMapContext(mlen, unorderedMap) mapiterinit(code.Type, uptr, &mapCtx.Iter) store(ctxptr, code.Idx, uintptr(unsafe.Pointer(mapCtx))) ctx.KeepRefs = append(ctx.KeepRefs, unsafe.Pointer(mapCtx)) if unorderedMap { b = appendMapKeyIndent(ctx, code.Next, b) } else { mapCtx.Start = len(b) mapCtx.First = len(b) } key := mapiterkey(&mapCtx.Iter) store(ctxptr, code.Next.Idx, uintptr(key)) code = code.Next case encoder.OpMapKey: mapCtx := (*encoder.MapContext)(ptrToUnsafePtr(load(ctxptr, code.Idx))) idx := mapCtx.Idx idx++ if (ctx.Option.Flag & encoder.UnorderedMapOption) != 0 { if idx < mapCtx.Len { b = appendMapKeyIndent(ctx, code, b) mapCtx.Idx = int(idx) key := mapiterkey(&mapCtx.Iter) store(ctxptr, code.Next.Idx, uintptr(key)) code = code.Next } else { b = appendObjectEnd(ctx, code, b) encoder.ReleaseMapContext(mapCtx) code = code.End.Next } } else { mapCtx.Slice.Items[mapCtx.Idx].Value = b[mapCtx.Start:len(b)] if idx < mapCtx.Len { mapCtx.Idx = int(idx) mapCtx.Start = len(b) key := mapiterkey(&mapCtx.Iter) store(ctxptr, code.Next.Idx, uintptr(key)) code = code.Next } else { code = code.End } } case encoder.OpMapValue: mapCtx := (*encoder.MapContext)(ptrToUnsafePtr(load(ctxptr, code.Idx))) if (ctx.Option.Flag & encoder.UnorderedMapOption) != 0 { b = appendColon(ctx, b) } else { mapCtx.Slice.Items[mapCtx.Idx].Key = b[mapCtx.Start:len(b)] mapCtx.Start = len(b) } value := mapitervalue(&mapCtx.Iter) store(ctxptr, code.Next.Idx, uintptr(value)) mapiternext(&mapCtx.Iter) code = code.Next case encoder.OpMapEnd: // this operation only used by sorted map. mapCtx := (*encoder.MapContext)(ptrToUnsafePtr(load(ctxptr, code.Idx))) sort.Sort(mapCtx.Slice) buf := mapCtx.Buf for _, item := range mapCtx.Slice.Items { buf = appendMapKeyValue(ctx, code, buf, item.Key, item.Value) } buf = appendMapEnd(ctx, code, buf) b = b[:mapCtx.First] b = append(b, buf...) mapCtx.Buf = buf encoder.ReleaseMapContext(mapCtx) code = code.Next case encoder.OpRecursivePtr: p := load(ctxptr, code.Idx) if p == 0 { code = code.Next break } store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) fallthrough case encoder.OpRecursive: ptr := load(ctxptr, code.Idx) if ptr != 0 { if recursiveLevel > encoder.StartDetectingCyclesAfter { for _, seen := range ctx.SeenPtr { if ptr == seen { return nil, errUnsupportedValue(code, ptr) } } } } ctx.SeenPtr = append(ctx.SeenPtr, ptr) c := code.Jmp.Code curlen := uintptr(len(ctx.Ptrs)) offsetNum := ptrOffset / uintptrSize oldOffset := ptrOffset ptrOffset += code.Jmp.CurLen * uintptrSize oldBaseIndent := ctx.BaseIndent indentDiffFromTop := c.Indent - 1 ctx.BaseIndent += code.Indent - indentDiffFromTop newLen := offsetNum + code.Jmp.CurLen + code.Jmp.NextLen if curlen < newLen { ctx.Ptrs = append(ctx.Ptrs, make([]uintptr, newLen-curlen)...) } ctxptr = ctx.Ptr() + ptrOffset // assign new ctxptr store(ctxptr, c.Idx, ptr) store(ctxptr, c.End.Next.Idx, oldOffset) store(ctxptr, c.End.Next.ElemIdx, uintptr(unsafe.Pointer(code.Next))) storeIndent(ctxptr, c.End.Next, uintptr(oldBaseIndent)) code = c recursiveLevel++ case encoder.OpRecursiveEnd: recursiveLevel-- // restore ctxptr restoreIndent(ctx, code, ctxptr) offset := load(ctxptr, code.Idx) ctx.SeenPtr = ctx.SeenPtr[:len(ctx.SeenPtr)-1] codePtr := load(ctxptr, code.ElemIdx) code = (*encoder.Opcode)(ptrToUnsafePtr(codePtr)) ctxptr = ctx.Ptr() + offset ptrOffset = offset case encoder.OpStructPtrHead: p := load(ctxptr, code.Idx) if p == 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) fallthrough case encoder.OpStructHead: p := load(ctxptr, code.Idx) if p == 0 && ((code.Flags&encoder.IndirectFlags) != 0 || code.Next.Op == encoder.OpStructEnd) { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendStructHead(ctx, b) } if len(code.Key) > 0 { if (code.Flags&encoder.IsTaggedKeyFlags) != 0 || code.Flags&encoder.AnonymousKeyFlags == 0 { b = appendStructKey(ctx, code, b) } } p += uintptr(code.Offset) code = code.Next store(ctxptr, code.Idx, p) case encoder.OpStructPtrHeadOmitEmpty: p := load(ctxptr, code.Idx) if p == 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) fallthrough case encoder.OpStructHeadOmitEmpty: p := load(ctxptr, code.Idx) if p == 0 && ((code.Flags&encoder.IndirectFlags) != 0 || code.Next.Op == encoder.OpStructEnd) { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendStructHead(ctx, b) } p += uintptr(code.Offset) if p == 0 || (ptrToPtr(p) == 0 && (code.Flags&encoder.IsNextOpPtrTypeFlags) != 0) { code = code.NextField } else { b = appendStructKey(ctx, code, b) code = code.Next store(ctxptr, code.Idx, p) } case encoder.OpStructPtrHeadInt: if (code.Flags & encoder.IndirectFlags) != 0 { p := load(ctxptr, code.Idx) if p == 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) } fallthrough case encoder.OpStructHeadInt: p := load(ctxptr, code.Idx) if p == 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendStructHead(ctx, b) } b = appendStructKey(ctx, code, b) b = appendInt(ctx, b, p+uintptr(code.Offset), code) b = appendComma(ctx, b) code = code.Next case encoder.OpStructPtrHeadOmitEmptyInt: if (code.Flags & encoder.IndirectFlags) != 0 { p := load(ctxptr, code.Idx) if p == 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) } fallthrough case encoder.OpStructHeadOmitEmptyInt: p := load(ctxptr, code.Idx) if p == 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendStructHead(ctx, b) } u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) v := u64 & ((1 << code.NumBitSize) - 1) if v == 0 { code = code.NextField } else { b = appendStructKey(ctx, code, b) b = appendInt(ctx, b, p+uintptr(code.Offset), code) b = appendComma(ctx, b) code = code.Next } case encoder.OpStructPtrHeadIntString: if (code.Flags & encoder.IndirectFlags) != 0 { p := load(ctxptr, code.Idx) if p == 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) } fallthrough case encoder.OpStructHeadIntString: p := load(ctxptr, code.Idx) if p == 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendStructHead(ctx, b) } b = appendStructKey(ctx, code, b) b = append(b, '"') b = appendInt(ctx, b, p+uintptr(code.Offset), code) b = append(b, '"') b = appendComma(ctx, b) code = code.Next case encoder.OpStructPtrHeadOmitEmptyIntString: if (code.Flags & encoder.IndirectFlags) != 0 { p := load(ctxptr, code.Idx) if p == 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) } fallthrough case encoder.OpStructHeadOmitEmptyIntString: p := load(ctxptr, code.Idx) if p == 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendStructHead(ctx, b) } p += uintptr(code.Offset) u64 := ptrToUint64(p, code.NumBitSize) v := u64 & ((1 << code.NumBitSize) - 1) if v == 0 { code = code.NextField } else { b = appendStructKey(ctx, code, b) b = append(b, '"') b = appendInt(ctx, b, p, code) b = append(b, '"') b = appendComma(ctx, b) code = code.Next } case encoder.OpStructPtrHeadIntPtr: p := load(ctxptr, code.Idx) if p == 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) fallthrough case encoder.OpStructHeadIntPtr: p := load(ctxptr, code.Idx) if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendStructHead(ctx, b) } b = appendStructKey(ctx, code, b) if (code.Flags & encoder.IndirectFlags) != 0 { p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) } if p == 0 { b = appendNull(ctx, b) } else { b = appendInt(ctx, b, p, code) } b = appendComma(ctx, b) code = code.Next case encoder.OpStructPtrHeadOmitEmptyIntPtr: p := load(ctxptr, code.Idx) if p == 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) fallthrough case encoder.OpStructHeadOmitEmptyIntPtr: p := load(ctxptr, code.Idx) if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendStructHead(ctx, b) } if (code.Flags & encoder.IndirectFlags) != 0 { p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) } if p != 0 { b = appendStructKey(ctx, code, b) b = appendInt(ctx, b, p, code) b = appendComma(ctx, b) } code = code.Next case encoder.OpStructPtrHeadIntPtrString: p := load(ctxptr, code.Idx) if p == 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) fallthrough case encoder.OpStructHeadIntPtrString: p := load(ctxptr, code.Idx) if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendStructHead(ctx, b) } b = appendStructKey(ctx, code, b) if (code.Flags & encoder.IndirectFlags) != 0 { p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) } if p == 0 { b = appendNull(ctx, b) } else { b = append(b, '"') b = appendInt(ctx, b, p, code) b = append(b, '"') } b = appendComma(ctx, b) code = code.Next case encoder.OpStructPtrHeadOmitEmptyIntPtrString: p := load(ctxptr, code.Idx) if p == 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) fallthrough case encoder.OpStructHeadOmitEmptyIntPtrString: p := load(ctxptr, code.Idx) if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendStructHead(ctx, b) } if (code.Flags & encoder.IndirectFlags) != 0 { p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) } if p != 0 { b = appendStructKey(ctx, code, b) b = append(b, '"') b = appendInt(ctx, b, p, code) b = append(b, '"') b = appendComma(ctx, b) } code = code.Next case encoder.OpStructPtrHeadUint: if (code.Flags & encoder.IndirectFlags) != 0 { p := load(ctxptr, code.Idx) if p == 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) } fallthrough case encoder.OpStructHeadUint: p := load(ctxptr, code.Idx) if p == 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendStructHead(ctx, b) } b = appendStructKey(ctx, code, b) b = appendUint(ctx, b, p+uintptr(code.Offset), code) b = appendComma(ctx, b) code = code.Next case encoder.OpStructPtrHeadOmitEmptyUint: if (code.Flags & encoder.IndirectFlags) != 0 { p := load(ctxptr, code.Idx) if p == 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) } fallthrough case encoder.OpStructHeadOmitEmptyUint: p := load(ctxptr, code.Idx) if p == 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendStructHead(ctx, b) } u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) v := u64 & ((1 << code.NumBitSize) - 1) if v == 0 { code = code.NextField } else { b = appendStructKey(ctx, code, b) b = appendUint(ctx, b, p+uintptr(code.Offset), code) b = appendComma(ctx, b) code = code.Next } case encoder.OpStructPtrHeadUintString: if (code.Flags & encoder.IndirectFlags) != 0 { p := load(ctxptr, code.Idx) if p == 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) } fallthrough case encoder.OpStructHeadUintString: p := load(ctxptr, code.Idx) if p == 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendStructHead(ctx, b) } b = appendStructKey(ctx, code, b) b = append(b, '"') b = appendUint(ctx, b, p+uintptr(code.Offset), code) b = append(b, '"') b = appendComma(ctx, b) code = code.Next case encoder.OpStructPtrHeadOmitEmptyUintString: if (code.Flags & encoder.IndirectFlags) != 0 { p := load(ctxptr, code.Idx) if p == 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) } fallthrough case encoder.OpStructHeadOmitEmptyUintString: p := load(ctxptr, code.Idx) if p == 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendStructHead(ctx, b) } u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) v := u64 & ((1 << code.NumBitSize) - 1) if v == 0 { code = code.NextField } else { b = appendStructKey(ctx, code, b) b = append(b, '"') b = appendUint(ctx, b, p+uintptr(code.Offset), code) b = append(b, '"') b = appendComma(ctx, b) code = code.Next } case encoder.OpStructPtrHeadUintPtr: p := load(ctxptr, code.Idx) if p == 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) fallthrough case encoder.OpStructHeadUintPtr: p := load(ctxptr, code.Idx) if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendStructHead(ctx, b) } b = appendStructKey(ctx, code, b) if (code.Flags & encoder.IndirectFlags) != 0 { p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) } if p == 0 { b = appendNull(ctx, b) } else { b = appendUint(ctx, b, p, code) } b = appendComma(ctx, b) code = code.Next case encoder.OpStructPtrHeadOmitEmptyUintPtr: p := load(ctxptr, code.Idx) if p == 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) fallthrough case encoder.OpStructHeadOmitEmptyUintPtr: p := load(ctxptr, code.Idx) if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendStructHead(ctx, b) } if (code.Flags & encoder.IndirectFlags) != 0 { p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) } if p != 0 { b = appendStructKey(ctx, code, b) b = appendUint(ctx, b, p, code) b = appendComma(ctx, b) } code = code.Next case encoder.OpStructPtrHeadUintPtrString: p := load(ctxptr, code.Idx) if p == 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) fallthrough case encoder.OpStructHeadUintPtrString: p := load(ctxptr, code.Idx) if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendStructHead(ctx, b) } b = appendStructKey(ctx, code, b) if (code.Flags & encoder.IndirectFlags) != 0 { p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) } if p == 0 { b = appendNull(ctx, b) } else { b = append(b, '"') b = appendUint(ctx, b, p, code) b = append(b, '"') } b = appendComma(ctx, b) code = code.Next case encoder.OpStructPtrHeadOmitEmptyUintPtrString: p := load(ctxptr, code.Idx) if p == 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) fallthrough case encoder.OpStructHeadOmitEmptyUintPtrString: p := load(ctxptr, code.Idx) if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendStructHead(ctx, b) } if (code.Flags & encoder.IndirectFlags) != 0 { p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) } if p != 0 { b = appendStructKey(ctx, code, b) b = append(b, '"') b = appendUint(ctx, b, p, code) b = append(b, '"') b = appendComma(ctx, b) } code = code.Next case encoder.OpStructPtrHeadFloat32: if (code.Flags & encoder.IndirectFlags) != 0 { p := load(ctxptr, code.Idx) if p == 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) } fallthrough case encoder.OpStructHeadFloat32: p := load(ctxptr, code.Idx) if p == 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendStructHead(ctx, b) } b = appendStructKey(ctx, code, b) b = appendFloat32(ctx, b, ptrToFloat32(p+uintptr(code.Offset))) b = appendComma(ctx, b) code = code.Next case encoder.OpStructPtrHeadOmitEmptyFloat32: if (code.Flags & encoder.IndirectFlags) != 0 { p := load(ctxptr, code.Idx) if p == 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) } fallthrough case encoder.OpStructHeadOmitEmptyFloat32: p := load(ctxptr, code.Idx) if p == 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendStructHead(ctx, b) } v := ptrToFloat32(p + uintptr(code.Offset)) if v == 0 { code = code.NextField } else { b = appendStructKey(ctx, code, b) b = appendFloat32(ctx, b, v) b = appendComma(ctx, b) code = code.Next } case encoder.OpStructPtrHeadFloat32String: if (code.Flags & encoder.IndirectFlags) != 0 { p := load(ctxptr, code.Idx)
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
true
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/goccy/go-json/internal/encoder/vm_color/util.go
vendor/github.com/goccy/go-json/internal/encoder/vm_color/util.go
package vm_color import ( "encoding/json" "fmt" "unsafe" "github.com/goccy/go-json/internal/encoder" "github.com/goccy/go-json/internal/runtime" ) const uintptrSize = 4 << (^uintptr(0) >> 63) var ( errUnsupportedValue = encoder.ErrUnsupportedValue errUnsupportedFloat = encoder.ErrUnsupportedFloat mapiterinit = encoder.MapIterInit mapiterkey = encoder.MapIterKey mapitervalue = encoder.MapIterValue mapiternext = encoder.MapIterNext maplen = encoder.MapLen ) type emptyInterface struct { typ *runtime.Type ptr unsafe.Pointer } type nonEmptyInterface struct { itab *struct { ityp *runtime.Type // static interface type typ *runtime.Type // dynamic concrete type // unused fields... } ptr unsafe.Pointer } func errUnimplementedOp(op encoder.OpType) error { return fmt.Errorf("encoder: opcode %s has not been implemented", op) } func load(base uintptr, idx uint32) uintptr { addr := base + uintptr(idx) return **(**uintptr)(unsafe.Pointer(&addr)) } func store(base uintptr, idx uint32, p uintptr) { addr := base + uintptr(idx) **(**uintptr)(unsafe.Pointer(&addr)) = p } func loadNPtr(base uintptr, idx uint32, ptrNum uint8) uintptr { addr := base + uintptr(idx) p := **(**uintptr)(unsafe.Pointer(&addr)) for i := uint8(0); i < ptrNum; i++ { if p == 0 { return 0 } p = ptrToPtr(p) } return p } func ptrToUint64(p uintptr, bitSize uint8) uint64 { switch bitSize { case 8: return (uint64)(**(**uint8)(unsafe.Pointer(&p))) case 16: return (uint64)(**(**uint16)(unsafe.Pointer(&p))) case 32: return (uint64)(**(**uint32)(unsafe.Pointer(&p))) case 64: return **(**uint64)(unsafe.Pointer(&p)) } return 0 } func ptrToFloat32(p uintptr) float32 { return **(**float32)(unsafe.Pointer(&p)) } func ptrToFloat64(p uintptr) float64 { return **(**float64)(unsafe.Pointer(&p)) } func ptrToBool(p uintptr) bool { return **(**bool)(unsafe.Pointer(&p)) } func ptrToBytes(p uintptr) []byte { return **(**[]byte)(unsafe.Pointer(&p)) } func ptrToNumber(p uintptr) json.Number { return **(**json.Number)(unsafe.Pointer(&p)) } func ptrToString(p uintptr) string { return **(**string)(unsafe.Pointer(&p)) } func ptrToSlice(p uintptr) *runtime.SliceHeader { return *(**runtime.SliceHeader)(unsafe.Pointer(&p)) } func ptrToPtr(p uintptr) uintptr { return uintptr(**(**unsafe.Pointer)(unsafe.Pointer(&p))) } func ptrToNPtr(p uintptr, ptrNum uint8) uintptr { for i := uint8(0); i < ptrNum; i++ { if p == 0 { return 0 } p = ptrToPtr(p) } return p } func ptrToUnsafePtr(p uintptr) unsafe.Pointer { return *(*unsafe.Pointer)(unsafe.Pointer(&p)) } func ptrToInterface(code *encoder.Opcode, p uintptr) interface{} { return *(*interface{})(unsafe.Pointer(&emptyInterface{ typ: code.Type, ptr: *(*unsafe.Pointer)(unsafe.Pointer(&p)), })) } func appendInt(ctx *encoder.RuntimeContext, b []byte, p uintptr, code *encoder.Opcode) []byte { format := ctx.Option.ColorScheme.Int b = append(b, format.Header...) b = encoder.AppendInt(ctx, b, p, code) return append(b, format.Footer...) } func appendUint(ctx *encoder.RuntimeContext, b []byte, p uintptr, code *encoder.Opcode) []byte { format := ctx.Option.ColorScheme.Uint b = append(b, format.Header...) b = encoder.AppendUint(ctx, b, p, code) return append(b, format.Footer...) } func appendFloat32(ctx *encoder.RuntimeContext, b []byte, v float32) []byte { format := ctx.Option.ColorScheme.Float b = append(b, format.Header...) b = encoder.AppendFloat32(ctx, b, v) return append(b, format.Footer...) } func appendFloat64(ctx *encoder.RuntimeContext, b []byte, v float64) []byte { format := ctx.Option.ColorScheme.Float b = append(b, format.Header...) b = encoder.AppendFloat64(ctx, b, v) return append(b, format.Footer...) } func appendString(ctx *encoder.RuntimeContext, b []byte, v string) []byte { format := ctx.Option.ColorScheme.String b = append(b, format.Header...) b = encoder.AppendString(ctx, b, v) return append(b, format.Footer...) } func appendByteSlice(ctx *encoder.RuntimeContext, b []byte, src []byte) []byte { format := ctx.Option.ColorScheme.Binary b = append(b, format.Header...) b = encoder.AppendByteSlice(ctx, b, src) return append(b, format.Footer...) } func appendNumber(ctx *encoder.RuntimeContext, b []byte, n json.Number) ([]byte, error) { format := ctx.Option.ColorScheme.Int b = append(b, format.Header...) bb, err := encoder.AppendNumber(ctx, b, n) if err != nil { return nil, err } return append(bb, format.Footer...), nil } func appendBool(ctx *encoder.RuntimeContext, b []byte, v bool) []byte { format := ctx.Option.ColorScheme.Bool b = append(b, format.Header...) if v { b = append(b, "true"...) } else { b = append(b, "false"...) } return append(b, format.Footer...) } func appendNull(ctx *encoder.RuntimeContext, b []byte) []byte { format := ctx.Option.ColorScheme.Null b = append(b, format.Header...) b = append(b, "null"...) return append(b, format.Footer...) } func appendComma(_ *encoder.RuntimeContext, b []byte) []byte { return append(b, ',') } func appendNullComma(ctx *encoder.RuntimeContext, b []byte) []byte { format := ctx.Option.ColorScheme.Null b = append(b, format.Header...) b = append(b, "null"...) return append(append(b, format.Footer...), ',') } func appendColon(_ *encoder.RuntimeContext, b []byte) []byte { last := len(b) - 1 b[last] = ':' return b } func appendMapKeyValue(_ *encoder.RuntimeContext, _ *encoder.Opcode, b, key, value []byte) []byte { b = append(b, key[:len(key)-1]...) b = append(b, ':') return append(b, value...) } func appendMapEnd(_ *encoder.RuntimeContext, _ *encoder.Opcode, b []byte) []byte { last := len(b) - 1 b[last] = '}' b = append(b, ',') return b } func appendMarshalJSON(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte, v interface{}) ([]byte, error) { return encoder.AppendMarshalJSON(ctx, code, b, v) } func appendMarshalText(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte, v interface{}) ([]byte, error) { format := ctx.Option.ColorScheme.String b = append(b, format.Header...) bb, err := encoder.AppendMarshalText(ctx, code, b, v) if err != nil { return nil, err } return append(bb, format.Footer...), nil } func appendArrayHead(_ *encoder.RuntimeContext, _ *encoder.Opcode, b []byte) []byte { return append(b, '[') } func appendArrayEnd(_ *encoder.RuntimeContext, _ *encoder.Opcode, b []byte) []byte { last := len(b) - 1 b[last] = ']' return append(b, ',') } func appendEmptyArray(_ *encoder.RuntimeContext, b []byte) []byte { return append(b, '[', ']', ',') } func appendEmptyObject(_ *encoder.RuntimeContext, b []byte) []byte { return append(b, '{', '}', ',') } func appendObjectEnd(_ *encoder.RuntimeContext, _ *encoder.Opcode, b []byte) []byte { last := len(b) - 1 b[last] = '}' return append(b, ',') } func appendStructHead(_ *encoder.RuntimeContext, b []byte) []byte { return append(b, '{') } func appendStructKey(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte) []byte { format := ctx.Option.ColorScheme.ObjectKey b = append(b, format.Header...) b = append(b, code.Key[:len(code.Key)-1]...) b = append(b, format.Footer...) return append(b, ':') } func appendStructEnd(_ *encoder.RuntimeContext, _ *encoder.Opcode, b []byte) []byte { return append(b, '}', ',') } func appendStructEndSkipLast(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte) []byte { last := len(b) - 1 if b[last] == ',' { b[last] = '}' return appendComma(ctx, b) } return appendStructEnd(ctx, code, b) } func restoreIndent(_ *encoder.RuntimeContext, _ *encoder.Opcode, _ uintptr) {} func storeIndent(_ uintptr, _ *encoder.Opcode, _ uintptr) {} func appendMapKeyIndent(_ *encoder.RuntimeContext, _ *encoder.Opcode, b []byte) []byte { return b } func appendArrayElemIndent(_ *encoder.RuntimeContext, _ *encoder.Opcode, b []byte) []byte { return b }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/goccy/go-json/internal/encoder/vm_color/debug_vm.go
vendor/github.com/goccy/go-json/internal/encoder/vm_color/debug_vm.go
package vm_color import ( "fmt" "github.com/goccy/go-json/internal/encoder" ) func DebugRun(ctx *encoder.RuntimeContext, b []byte, codeSet *encoder.OpcodeSet) ([]byte, error) { var code *encoder.Opcode if (ctx.Option.Flag & encoder.HTMLEscapeOption) != 0 { code = codeSet.EscapeKeyCode } else { code = codeSet.NoescapeKeyCode } defer func() { if err := recover(); err != nil { w := ctx.Option.DebugOut fmt.Fprintln(w, "=============[DEBUG]===============") fmt.Fprintln(w, "* [TYPE]") fmt.Fprintln(w, codeSet.Type) fmt.Fprintf(w, "\n") fmt.Fprintln(w, "* [ALL OPCODE]") fmt.Fprintln(w, code.Dump()) fmt.Fprintf(w, "\n") fmt.Fprintln(w, "* [CONTEXT]") fmt.Fprintf(w, "%+v\n", ctx) fmt.Fprintln(w, "===================================") panic(err) } }() return Run(ctx, b, codeSet) }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/goccy/go-json/internal/encoder/vm_color_indent/vm.go
vendor/github.com/goccy/go-json/internal/encoder/vm_color_indent/vm.go
// Code generated by internal/cmd/generator. DO NOT EDIT! package vm_color_indent import ( "math" "reflect" "sort" "unsafe" "github.com/goccy/go-json/internal/encoder" "github.com/goccy/go-json/internal/runtime" ) func Run(ctx *encoder.RuntimeContext, b []byte, codeSet *encoder.OpcodeSet) ([]byte, error) { recursiveLevel := 0 ptrOffset := uintptr(0) ctxptr := ctx.Ptr() var code *encoder.Opcode if (ctx.Option.Flag & encoder.HTMLEscapeOption) != 0 { code = codeSet.EscapeKeyCode } else { code = codeSet.NoescapeKeyCode } for { switch code.Op { default: return nil, errUnimplementedOp(code.Op) case encoder.OpPtr: p := load(ctxptr, code.Idx) code = code.Next store(ctxptr, code.Idx, ptrToPtr(p)) case encoder.OpIntPtr: p := loadNPtr(ctxptr, code.Idx, code.PtrNum) if p == 0 { b = appendNullComma(ctx, b) code = code.Next break } store(ctxptr, code.Idx, p) fallthrough case encoder.OpInt: b = appendInt(ctx, b, load(ctxptr, code.Idx), code) b = appendComma(ctx, b) code = code.Next case encoder.OpUintPtr: p := loadNPtr(ctxptr, code.Idx, code.PtrNum) if p == 0 { b = appendNullComma(ctx, b) code = code.Next break } store(ctxptr, code.Idx, p) fallthrough case encoder.OpUint: b = appendUint(ctx, b, load(ctxptr, code.Idx), code) b = appendComma(ctx, b) code = code.Next case encoder.OpIntString: b = append(b, '"') b = appendInt(ctx, b, load(ctxptr, code.Idx), code) b = append(b, '"') b = appendComma(ctx, b) code = code.Next case encoder.OpUintString: b = append(b, '"') b = appendUint(ctx, b, load(ctxptr, code.Idx), code) b = append(b, '"') b = appendComma(ctx, b) code = code.Next case encoder.OpFloat32Ptr: p := loadNPtr(ctxptr, code.Idx, code.PtrNum) if p == 0 { b = appendNull(ctx, b) b = appendComma(ctx, b) code = code.Next break } store(ctxptr, code.Idx, p) fallthrough case encoder.OpFloat32: b = appendFloat32(ctx, b, ptrToFloat32(load(ctxptr, code.Idx))) b = appendComma(ctx, b) code = code.Next case encoder.OpFloat64Ptr: p := loadNPtr(ctxptr, code.Idx, code.PtrNum) if p == 0 { b = appendNullComma(ctx, b) code = code.Next break } store(ctxptr, code.Idx, p) fallthrough case encoder.OpFloat64: v := ptrToFloat64(load(ctxptr, code.Idx)) if math.IsInf(v, 0) || math.IsNaN(v) { return nil, errUnsupportedFloat(v) } b = appendFloat64(ctx, b, v) b = appendComma(ctx, b) code = code.Next case encoder.OpStringPtr: p := loadNPtr(ctxptr, code.Idx, code.PtrNum) if p == 0 { b = appendNullComma(ctx, b) code = code.Next break } store(ctxptr, code.Idx, p) fallthrough case encoder.OpString: b = appendString(ctx, b, ptrToString(load(ctxptr, code.Idx))) b = appendComma(ctx, b) code = code.Next case encoder.OpBoolPtr: p := loadNPtr(ctxptr, code.Idx, code.PtrNum) if p == 0 { b = appendNullComma(ctx, b) code = code.Next break } store(ctxptr, code.Idx, p) fallthrough case encoder.OpBool: b = appendBool(ctx, b, ptrToBool(load(ctxptr, code.Idx))) b = appendComma(ctx, b) code = code.Next case encoder.OpBytesPtr: p := loadNPtr(ctxptr, code.Idx, code.PtrNum) if p == 0 { b = appendNullComma(ctx, b) code = code.Next break } store(ctxptr, code.Idx, p) fallthrough case encoder.OpBytes: b = appendByteSlice(ctx, b, ptrToBytes(load(ctxptr, code.Idx))) b = appendComma(ctx, b) code = code.Next case encoder.OpNumberPtr: p := loadNPtr(ctxptr, code.Idx, code.PtrNum) if p == 0 { b = appendNullComma(ctx, b) code = code.Next break } store(ctxptr, code.Idx, p) fallthrough case encoder.OpNumber: bb, err := appendNumber(ctx, b, ptrToNumber(load(ctxptr, code.Idx))) if err != nil { return nil, err } b = appendComma(ctx, bb) code = code.Next case encoder.OpInterfacePtr: p := loadNPtr(ctxptr, code.Idx, code.PtrNum) if p == 0 { b = appendNullComma(ctx, b) code = code.Next break } store(ctxptr, code.Idx, p) fallthrough case encoder.OpInterface: p := load(ctxptr, code.Idx) if p == 0 { b = appendNullComma(ctx, b) code = code.Next break } if recursiveLevel > encoder.StartDetectingCyclesAfter { for _, seen := range ctx.SeenPtr { if p == seen { return nil, errUnsupportedValue(code, p) } } } ctx.SeenPtr = append(ctx.SeenPtr, p) var ( typ *runtime.Type ifacePtr unsafe.Pointer ) up := ptrToUnsafePtr(p) if code.Flags&encoder.NonEmptyInterfaceFlags != 0 { iface := (*nonEmptyInterface)(up) ifacePtr = iface.ptr if iface.itab != nil { typ = iface.itab.typ } } else { iface := (*emptyInterface)(up) ifacePtr = iface.ptr typ = iface.typ } if ifacePtr == nil { isDirectedNil := typ != nil && typ.Kind() == reflect.Struct && !runtime.IfaceIndir(typ) if !isDirectedNil { b = appendNullComma(ctx, b) code = code.Next break } } ctx.KeepRefs = append(ctx.KeepRefs, up) ifaceCodeSet, err := encoder.CompileToGetCodeSet(ctx, uintptr(unsafe.Pointer(typ))) if err != nil { return nil, err } totalLength := uintptr(code.Length) + 3 nextTotalLength := uintptr(ifaceCodeSet.CodeLength) + 3 var c *encoder.Opcode if (ctx.Option.Flag & encoder.HTMLEscapeOption) != 0 { c = ifaceCodeSet.InterfaceEscapeKeyCode } else { c = ifaceCodeSet.InterfaceNoescapeKeyCode } curlen := uintptr(len(ctx.Ptrs)) offsetNum := ptrOffset / uintptrSize oldOffset := ptrOffset ptrOffset += totalLength * uintptrSize oldBaseIndent := ctx.BaseIndent ctx.BaseIndent += code.Indent newLen := offsetNum + totalLength + nextTotalLength if curlen < newLen { ctx.Ptrs = append(ctx.Ptrs, make([]uintptr, newLen-curlen)...) } ctxptr = ctx.Ptr() + ptrOffset // assign new ctxptr end := ifaceCodeSet.EndCode store(ctxptr, c.Idx, uintptr(ifacePtr)) store(ctxptr, end.Idx, oldOffset) store(ctxptr, end.ElemIdx, uintptr(unsafe.Pointer(code.Next))) storeIndent(ctxptr, end, uintptr(oldBaseIndent)) code = c recursiveLevel++ case encoder.OpInterfaceEnd: recursiveLevel-- // restore ctxptr offset := load(ctxptr, code.Idx) restoreIndent(ctx, code, ctxptr) ctx.SeenPtr = ctx.SeenPtr[:len(ctx.SeenPtr)-1] codePtr := load(ctxptr, code.ElemIdx) code = (*encoder.Opcode)(ptrToUnsafePtr(codePtr)) ctxptr = ctx.Ptr() + offset ptrOffset = offset case encoder.OpMarshalJSONPtr: p := load(ctxptr, code.Idx) if p == 0 { b = appendNullComma(ctx, b) code = code.Next break } store(ctxptr, code.Idx, ptrToPtr(p)) fallthrough case encoder.OpMarshalJSON: p := load(ctxptr, code.Idx) if p == 0 { b = appendNullComma(ctx, b) code = code.Next break } if (code.Flags&encoder.IsNilableTypeFlags) != 0 && (code.Flags&encoder.IndirectFlags) != 0 { p = ptrToPtr(p) } bb, err := appendMarshalJSON(ctx, code, b, ptrToInterface(code, p)) if err != nil { return nil, err } b = appendComma(ctx, bb) code = code.Next case encoder.OpMarshalTextPtr: p := load(ctxptr, code.Idx) if p == 0 { b = appendNullComma(ctx, b) code = code.Next break } store(ctxptr, code.Idx, ptrToPtr(p)) fallthrough case encoder.OpMarshalText: p := load(ctxptr, code.Idx) if p == 0 { b = append(b, `""`...) b = appendComma(ctx, b) code = code.Next break } if (code.Flags&encoder.IsNilableTypeFlags) != 0 && (code.Flags&encoder.IndirectFlags) != 0 { p = ptrToPtr(p) } bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p)) if err != nil { return nil, err } b = appendComma(ctx, bb) code = code.Next case encoder.OpSlicePtr: p := loadNPtr(ctxptr, code.Idx, code.PtrNum) if p == 0 { b = appendNullComma(ctx, b) code = code.End.Next break } store(ctxptr, code.Idx, p) fallthrough case encoder.OpSlice: p := load(ctxptr, code.Idx) slice := ptrToSlice(p) if p == 0 || slice.Data == nil { b = appendNullComma(ctx, b) code = code.End.Next break } store(ctxptr, code.ElemIdx, 0) store(ctxptr, code.Length, uintptr(slice.Len)) store(ctxptr, code.Idx, uintptr(slice.Data)) if slice.Len > 0 { b = appendArrayHead(ctx, code, b) code = code.Next store(ctxptr, code.Idx, uintptr(slice.Data)) } else { b = appendEmptyArray(ctx, b) code = code.End.Next } case encoder.OpSliceElem: idx := load(ctxptr, code.ElemIdx) length := load(ctxptr, code.Length) idx++ if idx < length { b = appendArrayElemIndent(ctx, code, b) store(ctxptr, code.ElemIdx, idx) data := load(ctxptr, code.Idx) size := uintptr(code.Size) code = code.Next store(ctxptr, code.Idx, data+idx*size) } else { b = appendArrayEnd(ctx, code, b) code = code.End.Next } case encoder.OpArrayPtr: p := loadNPtr(ctxptr, code.Idx, code.PtrNum) if p == 0 { b = appendNullComma(ctx, b) code = code.End.Next break } store(ctxptr, code.Idx, p) fallthrough case encoder.OpArray: p := load(ctxptr, code.Idx) if p == 0 { b = appendNullComma(ctx, b) code = code.End.Next break } if code.Length > 0 { b = appendArrayHead(ctx, code, b) store(ctxptr, code.ElemIdx, 0) code = code.Next store(ctxptr, code.Idx, p) } else { b = appendEmptyArray(ctx, b) code = code.End.Next } case encoder.OpArrayElem: idx := load(ctxptr, code.ElemIdx) idx++ if idx < uintptr(code.Length) { b = appendArrayElemIndent(ctx, code, b) store(ctxptr, code.ElemIdx, idx) p := load(ctxptr, code.Idx) size := uintptr(code.Size) code = code.Next store(ctxptr, code.Idx, p+idx*size) } else { b = appendArrayEnd(ctx, code, b) code = code.End.Next } case encoder.OpMapPtr: p := loadNPtr(ctxptr, code.Idx, code.PtrNum) if p == 0 { b = appendNullComma(ctx, b) code = code.End.Next break } store(ctxptr, code.Idx, p) fallthrough case encoder.OpMap: p := load(ctxptr, code.Idx) if p == 0 { b = appendNullComma(ctx, b) code = code.End.Next break } uptr := ptrToUnsafePtr(p) mlen := maplen(uptr) if mlen <= 0 { b = appendEmptyObject(ctx, b) code = code.End.Next break } b = appendStructHead(ctx, b) unorderedMap := (ctx.Option.Flag & encoder.UnorderedMapOption) != 0 mapCtx := encoder.NewMapContext(mlen, unorderedMap) mapiterinit(code.Type, uptr, &mapCtx.Iter) store(ctxptr, code.Idx, uintptr(unsafe.Pointer(mapCtx))) ctx.KeepRefs = append(ctx.KeepRefs, unsafe.Pointer(mapCtx)) if unorderedMap { b = appendMapKeyIndent(ctx, code.Next, b) } else { mapCtx.Start = len(b) mapCtx.First = len(b) } key := mapiterkey(&mapCtx.Iter) store(ctxptr, code.Next.Idx, uintptr(key)) code = code.Next case encoder.OpMapKey: mapCtx := (*encoder.MapContext)(ptrToUnsafePtr(load(ctxptr, code.Idx))) idx := mapCtx.Idx idx++ if (ctx.Option.Flag & encoder.UnorderedMapOption) != 0 { if idx < mapCtx.Len { b = appendMapKeyIndent(ctx, code, b) mapCtx.Idx = int(idx) key := mapiterkey(&mapCtx.Iter) store(ctxptr, code.Next.Idx, uintptr(key)) code = code.Next } else { b = appendObjectEnd(ctx, code, b) encoder.ReleaseMapContext(mapCtx) code = code.End.Next } } else { mapCtx.Slice.Items[mapCtx.Idx].Value = b[mapCtx.Start:len(b)] if idx < mapCtx.Len { mapCtx.Idx = int(idx) mapCtx.Start = len(b) key := mapiterkey(&mapCtx.Iter) store(ctxptr, code.Next.Idx, uintptr(key)) code = code.Next } else { code = code.End } } case encoder.OpMapValue: mapCtx := (*encoder.MapContext)(ptrToUnsafePtr(load(ctxptr, code.Idx))) if (ctx.Option.Flag & encoder.UnorderedMapOption) != 0 { b = appendColon(ctx, b) } else { mapCtx.Slice.Items[mapCtx.Idx].Key = b[mapCtx.Start:len(b)] mapCtx.Start = len(b) } value := mapitervalue(&mapCtx.Iter) store(ctxptr, code.Next.Idx, uintptr(value)) mapiternext(&mapCtx.Iter) code = code.Next case encoder.OpMapEnd: // this operation only used by sorted map. mapCtx := (*encoder.MapContext)(ptrToUnsafePtr(load(ctxptr, code.Idx))) sort.Sort(mapCtx.Slice) buf := mapCtx.Buf for _, item := range mapCtx.Slice.Items { buf = appendMapKeyValue(ctx, code, buf, item.Key, item.Value) } buf = appendMapEnd(ctx, code, buf) b = b[:mapCtx.First] b = append(b, buf...) mapCtx.Buf = buf encoder.ReleaseMapContext(mapCtx) code = code.Next case encoder.OpRecursivePtr: p := load(ctxptr, code.Idx) if p == 0 { code = code.Next break } store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) fallthrough case encoder.OpRecursive: ptr := load(ctxptr, code.Idx) if ptr != 0 { if recursiveLevel > encoder.StartDetectingCyclesAfter { for _, seen := range ctx.SeenPtr { if ptr == seen { return nil, errUnsupportedValue(code, ptr) } } } } ctx.SeenPtr = append(ctx.SeenPtr, ptr) c := code.Jmp.Code curlen := uintptr(len(ctx.Ptrs)) offsetNum := ptrOffset / uintptrSize oldOffset := ptrOffset ptrOffset += code.Jmp.CurLen * uintptrSize oldBaseIndent := ctx.BaseIndent indentDiffFromTop := c.Indent - 1 ctx.BaseIndent += code.Indent - indentDiffFromTop newLen := offsetNum + code.Jmp.CurLen + code.Jmp.NextLen if curlen < newLen { ctx.Ptrs = append(ctx.Ptrs, make([]uintptr, newLen-curlen)...) } ctxptr = ctx.Ptr() + ptrOffset // assign new ctxptr store(ctxptr, c.Idx, ptr) store(ctxptr, c.End.Next.Idx, oldOffset) store(ctxptr, c.End.Next.ElemIdx, uintptr(unsafe.Pointer(code.Next))) storeIndent(ctxptr, c.End.Next, uintptr(oldBaseIndent)) code = c recursiveLevel++ case encoder.OpRecursiveEnd: recursiveLevel-- // restore ctxptr restoreIndent(ctx, code, ctxptr) offset := load(ctxptr, code.Idx) ctx.SeenPtr = ctx.SeenPtr[:len(ctx.SeenPtr)-1] codePtr := load(ctxptr, code.ElemIdx) code = (*encoder.Opcode)(ptrToUnsafePtr(codePtr)) ctxptr = ctx.Ptr() + offset ptrOffset = offset case encoder.OpStructPtrHead: p := load(ctxptr, code.Idx) if p == 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) fallthrough case encoder.OpStructHead: p := load(ctxptr, code.Idx) if p == 0 && ((code.Flags&encoder.IndirectFlags) != 0 || code.Next.Op == encoder.OpStructEnd) { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendStructHead(ctx, b) } if len(code.Key) > 0 { if (code.Flags&encoder.IsTaggedKeyFlags) != 0 || code.Flags&encoder.AnonymousKeyFlags == 0 { b = appendStructKey(ctx, code, b) } } p += uintptr(code.Offset) code = code.Next store(ctxptr, code.Idx, p) case encoder.OpStructPtrHeadOmitEmpty: p := load(ctxptr, code.Idx) if p == 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) fallthrough case encoder.OpStructHeadOmitEmpty: p := load(ctxptr, code.Idx) if p == 0 && ((code.Flags&encoder.IndirectFlags) != 0 || code.Next.Op == encoder.OpStructEnd) { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendStructHead(ctx, b) } p += uintptr(code.Offset) if p == 0 || (ptrToPtr(p) == 0 && (code.Flags&encoder.IsNextOpPtrTypeFlags) != 0) { code = code.NextField } else { b = appendStructKey(ctx, code, b) code = code.Next store(ctxptr, code.Idx, p) } case encoder.OpStructPtrHeadInt: if (code.Flags & encoder.IndirectFlags) != 0 { p := load(ctxptr, code.Idx) if p == 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) } fallthrough case encoder.OpStructHeadInt: p := load(ctxptr, code.Idx) if p == 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendStructHead(ctx, b) } b = appendStructKey(ctx, code, b) b = appendInt(ctx, b, p+uintptr(code.Offset), code) b = appendComma(ctx, b) code = code.Next case encoder.OpStructPtrHeadOmitEmptyInt: if (code.Flags & encoder.IndirectFlags) != 0 { p := load(ctxptr, code.Idx) if p == 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) } fallthrough case encoder.OpStructHeadOmitEmptyInt: p := load(ctxptr, code.Idx) if p == 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendStructHead(ctx, b) } u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) v := u64 & ((1 << code.NumBitSize) - 1) if v == 0 { code = code.NextField } else { b = appendStructKey(ctx, code, b) b = appendInt(ctx, b, p+uintptr(code.Offset), code) b = appendComma(ctx, b) code = code.Next } case encoder.OpStructPtrHeadIntString: if (code.Flags & encoder.IndirectFlags) != 0 { p := load(ctxptr, code.Idx) if p == 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) } fallthrough case encoder.OpStructHeadIntString: p := load(ctxptr, code.Idx) if p == 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendStructHead(ctx, b) } b = appendStructKey(ctx, code, b) b = append(b, '"') b = appendInt(ctx, b, p+uintptr(code.Offset), code) b = append(b, '"') b = appendComma(ctx, b) code = code.Next case encoder.OpStructPtrHeadOmitEmptyIntString: if (code.Flags & encoder.IndirectFlags) != 0 { p := load(ctxptr, code.Idx) if p == 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) } fallthrough case encoder.OpStructHeadOmitEmptyIntString: p := load(ctxptr, code.Idx) if p == 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendStructHead(ctx, b) } p += uintptr(code.Offset) u64 := ptrToUint64(p, code.NumBitSize) v := u64 & ((1 << code.NumBitSize) - 1) if v == 0 { code = code.NextField } else { b = appendStructKey(ctx, code, b) b = append(b, '"') b = appendInt(ctx, b, p, code) b = append(b, '"') b = appendComma(ctx, b) code = code.Next } case encoder.OpStructPtrHeadIntPtr: p := load(ctxptr, code.Idx) if p == 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) fallthrough case encoder.OpStructHeadIntPtr: p := load(ctxptr, code.Idx) if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendStructHead(ctx, b) } b = appendStructKey(ctx, code, b) if (code.Flags & encoder.IndirectFlags) != 0 { p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) } if p == 0 { b = appendNull(ctx, b) } else { b = appendInt(ctx, b, p, code) } b = appendComma(ctx, b) code = code.Next case encoder.OpStructPtrHeadOmitEmptyIntPtr: p := load(ctxptr, code.Idx) if p == 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) fallthrough case encoder.OpStructHeadOmitEmptyIntPtr: p := load(ctxptr, code.Idx) if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendStructHead(ctx, b) } if (code.Flags & encoder.IndirectFlags) != 0 { p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) } if p != 0 { b = appendStructKey(ctx, code, b) b = appendInt(ctx, b, p, code) b = appendComma(ctx, b) } code = code.Next case encoder.OpStructPtrHeadIntPtrString: p := load(ctxptr, code.Idx) if p == 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) fallthrough case encoder.OpStructHeadIntPtrString: p := load(ctxptr, code.Idx) if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendStructHead(ctx, b) } b = appendStructKey(ctx, code, b) if (code.Flags & encoder.IndirectFlags) != 0 { p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) } if p == 0 { b = appendNull(ctx, b) } else { b = append(b, '"') b = appendInt(ctx, b, p, code) b = append(b, '"') } b = appendComma(ctx, b) code = code.Next case encoder.OpStructPtrHeadOmitEmptyIntPtrString: p := load(ctxptr, code.Idx) if p == 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) fallthrough case encoder.OpStructHeadOmitEmptyIntPtrString: p := load(ctxptr, code.Idx) if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendStructHead(ctx, b) } if (code.Flags & encoder.IndirectFlags) != 0 { p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) } if p != 0 { b = appendStructKey(ctx, code, b) b = append(b, '"') b = appendInt(ctx, b, p, code) b = append(b, '"') b = appendComma(ctx, b) } code = code.Next case encoder.OpStructPtrHeadUint: if (code.Flags & encoder.IndirectFlags) != 0 { p := load(ctxptr, code.Idx) if p == 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) } fallthrough case encoder.OpStructHeadUint: p := load(ctxptr, code.Idx) if p == 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendStructHead(ctx, b) } b = appendStructKey(ctx, code, b) b = appendUint(ctx, b, p+uintptr(code.Offset), code) b = appendComma(ctx, b) code = code.Next case encoder.OpStructPtrHeadOmitEmptyUint: if (code.Flags & encoder.IndirectFlags) != 0 { p := load(ctxptr, code.Idx) if p == 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) } fallthrough case encoder.OpStructHeadOmitEmptyUint: p := load(ctxptr, code.Idx) if p == 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendStructHead(ctx, b) } u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) v := u64 & ((1 << code.NumBitSize) - 1) if v == 0 { code = code.NextField } else { b = appendStructKey(ctx, code, b) b = appendUint(ctx, b, p+uintptr(code.Offset), code) b = appendComma(ctx, b) code = code.Next } case encoder.OpStructPtrHeadUintString: if (code.Flags & encoder.IndirectFlags) != 0 { p := load(ctxptr, code.Idx) if p == 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) } fallthrough case encoder.OpStructHeadUintString: p := load(ctxptr, code.Idx) if p == 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendStructHead(ctx, b) } b = appendStructKey(ctx, code, b) b = append(b, '"') b = appendUint(ctx, b, p+uintptr(code.Offset), code) b = append(b, '"') b = appendComma(ctx, b) code = code.Next case encoder.OpStructPtrHeadOmitEmptyUintString: if (code.Flags & encoder.IndirectFlags) != 0 { p := load(ctxptr, code.Idx) if p == 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) } fallthrough case encoder.OpStructHeadOmitEmptyUintString: p := load(ctxptr, code.Idx) if p == 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendStructHead(ctx, b) } u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) v := u64 & ((1 << code.NumBitSize) - 1) if v == 0 { code = code.NextField } else { b = appendStructKey(ctx, code, b) b = append(b, '"') b = appendUint(ctx, b, p+uintptr(code.Offset), code) b = append(b, '"') b = appendComma(ctx, b) code = code.Next } case encoder.OpStructPtrHeadUintPtr: p := load(ctxptr, code.Idx) if p == 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) fallthrough case encoder.OpStructHeadUintPtr: p := load(ctxptr, code.Idx) if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendStructHead(ctx, b) } b = appendStructKey(ctx, code, b) if (code.Flags & encoder.IndirectFlags) != 0 { p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) } if p == 0 { b = appendNull(ctx, b) } else { b = appendUint(ctx, b, p, code) } b = appendComma(ctx, b) code = code.Next case encoder.OpStructPtrHeadOmitEmptyUintPtr: p := load(ctxptr, code.Idx) if p == 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) fallthrough case encoder.OpStructHeadOmitEmptyUintPtr: p := load(ctxptr, code.Idx) if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendStructHead(ctx, b) } if (code.Flags & encoder.IndirectFlags) != 0 { p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) } if p != 0 { b = appendStructKey(ctx, code, b) b = appendUint(ctx, b, p, code) b = appendComma(ctx, b) } code = code.Next case encoder.OpStructPtrHeadUintPtrString: p := load(ctxptr, code.Idx) if p == 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) fallthrough case encoder.OpStructHeadUintPtrString: p := load(ctxptr, code.Idx) if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendStructHead(ctx, b) } b = appendStructKey(ctx, code, b) if (code.Flags & encoder.IndirectFlags) != 0 { p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) } if p == 0 { b = appendNull(ctx, b) } else { b = append(b, '"') b = appendUint(ctx, b, p, code) b = append(b, '"') } b = appendComma(ctx, b) code = code.Next case encoder.OpStructPtrHeadOmitEmptyUintPtrString: p := load(ctxptr, code.Idx) if p == 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) fallthrough case encoder.OpStructHeadOmitEmptyUintPtrString: p := load(ctxptr, code.Idx) if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendStructHead(ctx, b) } if (code.Flags & encoder.IndirectFlags) != 0 { p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) } if p != 0 { b = appendStructKey(ctx, code, b) b = append(b, '"') b = appendUint(ctx, b, p, code) b = append(b, '"') b = appendComma(ctx, b) } code = code.Next case encoder.OpStructPtrHeadFloat32: if (code.Flags & encoder.IndirectFlags) != 0 { p := load(ctxptr, code.Idx) if p == 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) } fallthrough case encoder.OpStructHeadFloat32: p := load(ctxptr, code.Idx) if p == 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendStructHead(ctx, b) } b = appendStructKey(ctx, code, b) b = appendFloat32(ctx, b, ptrToFloat32(p+uintptr(code.Offset))) b = appendComma(ctx, b) code = code.Next case encoder.OpStructPtrHeadOmitEmptyFloat32: if (code.Flags & encoder.IndirectFlags) != 0 { p := load(ctxptr, code.Idx) if p == 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) } fallthrough case encoder.OpStructHeadOmitEmptyFloat32: p := load(ctxptr, code.Idx) if p == 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendStructHead(ctx, b) } v := ptrToFloat32(p + uintptr(code.Offset)) if v == 0 { code = code.NextField } else { b = appendStructKey(ctx, code, b) b = appendFloat32(ctx, b, v) b = appendComma(ctx, b) code = code.Next } case encoder.OpStructPtrHeadFloat32String: if (code.Flags & encoder.IndirectFlags) != 0 { p := load(ctxptr, code.Idx)
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
true
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/goccy/go-json/internal/encoder/vm_color_indent/util.go
vendor/github.com/goccy/go-json/internal/encoder/vm_color_indent/util.go
package vm_color_indent import ( "encoding/json" "fmt" "unsafe" "github.com/goccy/go-json/internal/encoder" "github.com/goccy/go-json/internal/runtime" ) const uintptrSize = 4 << (^uintptr(0) >> 63) var ( appendIndent = encoder.AppendIndent appendStructEnd = encoder.AppendStructEndIndent errUnsupportedValue = encoder.ErrUnsupportedValue errUnsupportedFloat = encoder.ErrUnsupportedFloat mapiterinit = encoder.MapIterInit mapiterkey = encoder.MapIterKey mapitervalue = encoder.MapIterValue mapiternext = encoder.MapIterNext maplen = encoder.MapLen ) type emptyInterface struct { typ *runtime.Type ptr unsafe.Pointer } type nonEmptyInterface struct { itab *struct { ityp *runtime.Type // static interface type typ *runtime.Type // dynamic concrete type // unused fields... } ptr unsafe.Pointer } func errUnimplementedOp(op encoder.OpType) error { return fmt.Errorf("encoder (indent): opcode %s has not been implemented", op) } func load(base uintptr, idx uint32) uintptr { addr := base + uintptr(idx) return **(**uintptr)(unsafe.Pointer(&addr)) } func store(base uintptr, idx uint32, p uintptr) { addr := base + uintptr(idx) **(**uintptr)(unsafe.Pointer(&addr)) = p } func loadNPtr(base uintptr, idx uint32, ptrNum uint8) uintptr { addr := base + uintptr(idx) p := **(**uintptr)(unsafe.Pointer(&addr)) for i := uint8(0); i < ptrNum; i++ { if p == 0 { return 0 } p = ptrToPtr(p) } return p } func ptrToUint64(p uintptr, bitSize uint8) uint64 { switch bitSize { case 8: return (uint64)(**(**uint8)(unsafe.Pointer(&p))) case 16: return (uint64)(**(**uint16)(unsafe.Pointer(&p))) case 32: return (uint64)(**(**uint32)(unsafe.Pointer(&p))) case 64: return **(**uint64)(unsafe.Pointer(&p)) } return 0 } func ptrToFloat32(p uintptr) float32 { return **(**float32)(unsafe.Pointer(&p)) } func ptrToFloat64(p uintptr) float64 { return **(**float64)(unsafe.Pointer(&p)) } func ptrToBool(p uintptr) bool { return **(**bool)(unsafe.Pointer(&p)) } func ptrToBytes(p uintptr) []byte { return **(**[]byte)(unsafe.Pointer(&p)) } func ptrToNumber(p uintptr) json.Number { return **(**json.Number)(unsafe.Pointer(&p)) } func ptrToString(p uintptr) string { return **(**string)(unsafe.Pointer(&p)) } func ptrToSlice(p uintptr) *runtime.SliceHeader { return *(**runtime.SliceHeader)(unsafe.Pointer(&p)) } func ptrToPtr(p uintptr) uintptr { return uintptr(**(**unsafe.Pointer)(unsafe.Pointer(&p))) } func ptrToNPtr(p uintptr, ptrNum uint8) uintptr { for i := uint8(0); i < ptrNum; i++ { if p == 0 { return 0 } p = ptrToPtr(p) } return p } func ptrToUnsafePtr(p uintptr) unsafe.Pointer { return *(*unsafe.Pointer)(unsafe.Pointer(&p)) } func ptrToInterface(code *encoder.Opcode, p uintptr) interface{} { return *(*interface{})(unsafe.Pointer(&emptyInterface{ typ: code.Type, ptr: *(*unsafe.Pointer)(unsafe.Pointer(&p)), })) } func appendInt(ctx *encoder.RuntimeContext, b []byte, p uintptr, code *encoder.Opcode) []byte { format := ctx.Option.ColorScheme.Int b = append(b, format.Header...) b = encoder.AppendInt(ctx, b, p, code) return append(b, format.Footer...) } func appendUint(ctx *encoder.RuntimeContext, b []byte, p uintptr, code *encoder.Opcode) []byte { format := ctx.Option.ColorScheme.Uint b = append(b, format.Header...) b = encoder.AppendUint(ctx, b, p, code) return append(b, format.Footer...) } func appendFloat32(ctx *encoder.RuntimeContext, b []byte, v float32) []byte { format := ctx.Option.ColorScheme.Float b = append(b, format.Header...) b = encoder.AppendFloat32(ctx, b, v) return append(b, format.Footer...) } func appendFloat64(ctx *encoder.RuntimeContext, b []byte, v float64) []byte { format := ctx.Option.ColorScheme.Float b = append(b, format.Header...) b = encoder.AppendFloat64(ctx, b, v) return append(b, format.Footer...) } func appendString(ctx *encoder.RuntimeContext, b []byte, v string) []byte { format := ctx.Option.ColorScheme.String b = append(b, format.Header...) b = encoder.AppendString(ctx, b, v) return append(b, format.Footer...) } func appendByteSlice(ctx *encoder.RuntimeContext, b []byte, src []byte) []byte { format := ctx.Option.ColorScheme.Binary b = append(b, format.Header...) b = encoder.AppendByteSlice(ctx, b, src) return append(b, format.Footer...) } func appendNumber(ctx *encoder.RuntimeContext, b []byte, n json.Number) ([]byte, error) { format := ctx.Option.ColorScheme.Int b = append(b, format.Header...) bb, err := encoder.AppendNumber(ctx, b, n) if err != nil { return nil, err } return append(bb, format.Footer...), nil } func appendBool(ctx *encoder.RuntimeContext, b []byte, v bool) []byte { format := ctx.Option.ColorScheme.Bool b = append(b, format.Header...) if v { b = append(b, "true"...) } else { b = append(b, "false"...) } return append(b, format.Footer...) } func appendNull(ctx *encoder.RuntimeContext, b []byte) []byte { format := ctx.Option.ColorScheme.Null b = append(b, format.Header...) b = append(b, "null"...) return append(b, format.Footer...) } func appendComma(_ *encoder.RuntimeContext, b []byte) []byte { return append(b, ',', '\n') } func appendNullComma(ctx *encoder.RuntimeContext, b []byte) []byte { format := ctx.Option.ColorScheme.Null b = append(b, format.Header...) b = append(b, "null"...) return append(append(b, format.Footer...), ',', '\n') } func appendColon(_ *encoder.RuntimeContext, b []byte) []byte { return append(b[:len(b)-2], ':', ' ') } func appendMapKeyValue(ctx *encoder.RuntimeContext, code *encoder.Opcode, b, key, value []byte) []byte { b = appendIndent(ctx, b, code.Indent+1) b = append(b, key...) b[len(b)-2] = ':' b[len(b)-1] = ' ' return append(b, value...) } func appendMapEnd(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte) []byte { b = b[:len(b)-2] b = append(b, '\n') b = appendIndent(ctx, b, code.Indent) return append(b, '}', ',', '\n') } func appendArrayHead(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte) []byte { b = append(b, '[', '\n') return appendIndent(ctx, b, code.Indent+1) } func appendArrayEnd(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte) []byte { b = b[:len(b)-2] b = append(b, '\n') b = appendIndent(ctx, b, code.Indent) return append(b, ']', ',', '\n') } func appendEmptyArray(_ *encoder.RuntimeContext, b []byte) []byte { return append(b, '[', ']', ',', '\n') } func appendEmptyObject(_ *encoder.RuntimeContext, b []byte) []byte { return append(b, '{', '}', ',', '\n') } func appendObjectEnd(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte) []byte { last := len(b) - 1 // replace comma to newline b[last-1] = '\n' b = appendIndent(ctx, b[:last], code.Indent) return append(b, '}', ',', '\n') } func appendMarshalJSON(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte, v interface{}) ([]byte, error) { return encoder.AppendMarshalJSONIndent(ctx, code, b, v) } func appendMarshalText(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte, v interface{}) ([]byte, error) { format := ctx.Option.ColorScheme.String b = append(b, format.Header...) bb, err := encoder.AppendMarshalTextIndent(ctx, code, b, v) if err != nil { return nil, err } return append(bb, format.Footer...), nil } func appendStructHead(_ *encoder.RuntimeContext, b []byte) []byte { return append(b, '{', '\n') } func appendStructKey(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte) []byte { b = appendIndent(ctx, b, code.Indent) format := ctx.Option.ColorScheme.ObjectKey b = append(b, format.Header...) b = append(b, code.Key[:len(code.Key)-1]...) b = append(b, format.Footer...) return append(b, ':', ' ') } func appendStructEndSkipLast(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte) []byte { last := len(b) - 1 if b[last-1] == '{' { b[last] = '}' } else { if b[last] == '\n' { // to remove ',' and '\n' characters b = b[:len(b)-2] } b = append(b, '\n') b = appendIndent(ctx, b, code.Indent-1) b = append(b, '}') } return appendComma(ctx, b) } func restoreIndent(ctx *encoder.RuntimeContext, code *encoder.Opcode, ctxptr uintptr) { ctx.BaseIndent = uint32(load(ctxptr, code.Length)) } func storeIndent(ctxptr uintptr, code *encoder.Opcode, indent uintptr) { store(ctxptr, code.Length, indent) } func appendArrayElemIndent(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte) []byte { return appendIndent(ctx, b, code.Indent+1) } func appendMapKeyIndent(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte) []byte { return appendIndent(ctx, b, code.Indent) }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/goccy/go-json/internal/encoder/vm_color_indent/debug_vm.go
vendor/github.com/goccy/go-json/internal/encoder/vm_color_indent/debug_vm.go
package vm_color_indent import ( "fmt" "github.com/goccy/go-json/internal/encoder" ) func DebugRun(ctx *encoder.RuntimeContext, b []byte, codeSet *encoder.OpcodeSet) ([]byte, error) { var code *encoder.Opcode if (ctx.Option.Flag & encoder.HTMLEscapeOption) != 0 { code = codeSet.EscapeKeyCode } else { code = codeSet.NoescapeKeyCode } defer func() { if err := recover(); err != nil { w := ctx.Option.DebugOut fmt.Fprintln(w, "=============[DEBUG]===============") fmt.Fprintln(w, "* [TYPE]") fmt.Fprintln(w, codeSet.Type) fmt.Fprintf(w, "\n") fmt.Fprintln(w, "* [ALL OPCODE]") fmt.Fprintln(w, code.Dump()) fmt.Fprintf(w, "\n") fmt.Fprintln(w, "* [CONTEXT]") fmt.Fprintf(w, "%+v\n", ctx) fmt.Fprintln(w, "===================================") panic(err) } }() return Run(ctx, b, codeSet) }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/goccy/go-json/internal/encoder/vm/hack.go
vendor/github.com/goccy/go-json/internal/encoder/vm/hack.go
package vm import ( // HACK: compile order // `vm`, `vm_indent`, `vm_color`, `vm_color_indent` packages uses a lot of memory to compile, // so forcibly make dependencies and avoid compiling in concurrent. // dependency order: vm => vm_indent => vm_color => vm_color_indent _ "github.com/goccy/go-json/internal/encoder/vm_indent" )
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/goccy/go-json/internal/encoder/vm/vm.go
vendor/github.com/goccy/go-json/internal/encoder/vm/vm.go
// Code generated by internal/cmd/generator. DO NOT EDIT! package vm import ( "math" "reflect" "sort" "unsafe" "github.com/goccy/go-json/internal/encoder" "github.com/goccy/go-json/internal/runtime" ) func Run(ctx *encoder.RuntimeContext, b []byte, codeSet *encoder.OpcodeSet) ([]byte, error) { recursiveLevel := 0 ptrOffset := uintptr(0) ctxptr := ctx.Ptr() var code *encoder.Opcode if (ctx.Option.Flag & encoder.HTMLEscapeOption) != 0 { code = codeSet.EscapeKeyCode } else { code = codeSet.NoescapeKeyCode } for { switch code.Op { default: return nil, errUnimplementedOp(code.Op) case encoder.OpPtr: p := load(ctxptr, code.Idx) code = code.Next store(ctxptr, code.Idx, ptrToPtr(p)) case encoder.OpIntPtr: p := loadNPtr(ctxptr, code.Idx, code.PtrNum) if p == 0 { b = appendNullComma(ctx, b) code = code.Next break } store(ctxptr, code.Idx, p) fallthrough case encoder.OpInt: b = appendInt(ctx, b, load(ctxptr, code.Idx), code) b = appendComma(ctx, b) code = code.Next case encoder.OpUintPtr: p := loadNPtr(ctxptr, code.Idx, code.PtrNum) if p == 0 { b = appendNullComma(ctx, b) code = code.Next break } store(ctxptr, code.Idx, p) fallthrough case encoder.OpUint: b = appendUint(ctx, b, load(ctxptr, code.Idx), code) b = appendComma(ctx, b) code = code.Next case encoder.OpIntString: b = append(b, '"') b = appendInt(ctx, b, load(ctxptr, code.Idx), code) b = append(b, '"') b = appendComma(ctx, b) code = code.Next case encoder.OpUintString: b = append(b, '"') b = appendUint(ctx, b, load(ctxptr, code.Idx), code) b = append(b, '"') b = appendComma(ctx, b) code = code.Next case encoder.OpFloat32Ptr: p := loadNPtr(ctxptr, code.Idx, code.PtrNum) if p == 0 { b = appendNull(ctx, b) b = appendComma(ctx, b) code = code.Next break } store(ctxptr, code.Idx, p) fallthrough case encoder.OpFloat32: b = appendFloat32(ctx, b, ptrToFloat32(load(ctxptr, code.Idx))) b = appendComma(ctx, b) code = code.Next case encoder.OpFloat64Ptr: p := loadNPtr(ctxptr, code.Idx, code.PtrNum) if p == 0 { b = appendNullComma(ctx, b) code = code.Next break } store(ctxptr, code.Idx, p) fallthrough case encoder.OpFloat64: v := ptrToFloat64(load(ctxptr, code.Idx)) if math.IsInf(v, 0) || math.IsNaN(v) { return nil, errUnsupportedFloat(v) } b = appendFloat64(ctx, b, v) b = appendComma(ctx, b) code = code.Next case encoder.OpStringPtr: p := loadNPtr(ctxptr, code.Idx, code.PtrNum) if p == 0 { b = appendNullComma(ctx, b) code = code.Next break } store(ctxptr, code.Idx, p) fallthrough case encoder.OpString: b = appendString(ctx, b, ptrToString(load(ctxptr, code.Idx))) b = appendComma(ctx, b) code = code.Next case encoder.OpBoolPtr: p := loadNPtr(ctxptr, code.Idx, code.PtrNum) if p == 0 { b = appendNullComma(ctx, b) code = code.Next break } store(ctxptr, code.Idx, p) fallthrough case encoder.OpBool: b = appendBool(ctx, b, ptrToBool(load(ctxptr, code.Idx))) b = appendComma(ctx, b) code = code.Next case encoder.OpBytesPtr: p := loadNPtr(ctxptr, code.Idx, code.PtrNum) if p == 0 { b = appendNullComma(ctx, b) code = code.Next break } store(ctxptr, code.Idx, p) fallthrough case encoder.OpBytes: b = appendByteSlice(ctx, b, ptrToBytes(load(ctxptr, code.Idx))) b = appendComma(ctx, b) code = code.Next case encoder.OpNumberPtr: p := loadNPtr(ctxptr, code.Idx, code.PtrNum) if p == 0 { b = appendNullComma(ctx, b) code = code.Next break } store(ctxptr, code.Idx, p) fallthrough case encoder.OpNumber: bb, err := appendNumber(ctx, b, ptrToNumber(load(ctxptr, code.Idx))) if err != nil { return nil, err } b = appendComma(ctx, bb) code = code.Next case encoder.OpInterfacePtr: p := loadNPtr(ctxptr, code.Idx, code.PtrNum) if p == 0 { b = appendNullComma(ctx, b) code = code.Next break } store(ctxptr, code.Idx, p) fallthrough case encoder.OpInterface: p := load(ctxptr, code.Idx) if p == 0 { b = appendNullComma(ctx, b) code = code.Next break } if recursiveLevel > encoder.StartDetectingCyclesAfter { for _, seen := range ctx.SeenPtr { if p == seen { return nil, errUnsupportedValue(code, p) } } } ctx.SeenPtr = append(ctx.SeenPtr, p) var ( typ *runtime.Type ifacePtr unsafe.Pointer ) up := ptrToUnsafePtr(p) if code.Flags&encoder.NonEmptyInterfaceFlags != 0 { iface := (*nonEmptyInterface)(up) ifacePtr = iface.ptr if iface.itab != nil { typ = iface.itab.typ } } else { iface := (*emptyInterface)(up) ifacePtr = iface.ptr typ = iface.typ } if ifacePtr == nil { isDirectedNil := typ != nil && typ.Kind() == reflect.Struct && !runtime.IfaceIndir(typ) if !isDirectedNil { b = appendNullComma(ctx, b) code = code.Next break } } ctx.KeepRefs = append(ctx.KeepRefs, up) ifaceCodeSet, err := encoder.CompileToGetCodeSet(ctx, uintptr(unsafe.Pointer(typ))) if err != nil { return nil, err } totalLength := uintptr(code.Length) + 3 nextTotalLength := uintptr(ifaceCodeSet.CodeLength) + 3 var c *encoder.Opcode if (ctx.Option.Flag & encoder.HTMLEscapeOption) != 0 { c = ifaceCodeSet.InterfaceEscapeKeyCode } else { c = ifaceCodeSet.InterfaceNoescapeKeyCode } curlen := uintptr(len(ctx.Ptrs)) offsetNum := ptrOffset / uintptrSize oldOffset := ptrOffset ptrOffset += totalLength * uintptrSize oldBaseIndent := ctx.BaseIndent ctx.BaseIndent += code.Indent newLen := offsetNum + totalLength + nextTotalLength if curlen < newLen { ctx.Ptrs = append(ctx.Ptrs, make([]uintptr, newLen-curlen)...) } ctxptr = ctx.Ptr() + ptrOffset // assign new ctxptr end := ifaceCodeSet.EndCode store(ctxptr, c.Idx, uintptr(ifacePtr)) store(ctxptr, end.Idx, oldOffset) store(ctxptr, end.ElemIdx, uintptr(unsafe.Pointer(code.Next))) storeIndent(ctxptr, end, uintptr(oldBaseIndent)) code = c recursiveLevel++ case encoder.OpInterfaceEnd: recursiveLevel-- // restore ctxptr offset := load(ctxptr, code.Idx) restoreIndent(ctx, code, ctxptr) ctx.SeenPtr = ctx.SeenPtr[:len(ctx.SeenPtr)-1] codePtr := load(ctxptr, code.ElemIdx) code = (*encoder.Opcode)(ptrToUnsafePtr(codePtr)) ctxptr = ctx.Ptr() + offset ptrOffset = offset case encoder.OpMarshalJSONPtr: p := load(ctxptr, code.Idx) if p == 0 { b = appendNullComma(ctx, b) code = code.Next break } store(ctxptr, code.Idx, ptrToPtr(p)) fallthrough case encoder.OpMarshalJSON: p := load(ctxptr, code.Idx) if p == 0 { b = appendNullComma(ctx, b) code = code.Next break } if (code.Flags&encoder.IsNilableTypeFlags) != 0 && (code.Flags&encoder.IndirectFlags) != 0 { p = ptrToPtr(p) } bb, err := appendMarshalJSON(ctx, code, b, ptrToInterface(code, p)) if err != nil { return nil, err } b = appendComma(ctx, bb) code = code.Next case encoder.OpMarshalTextPtr: p := load(ctxptr, code.Idx) if p == 0 { b = appendNullComma(ctx, b) code = code.Next break } store(ctxptr, code.Idx, ptrToPtr(p)) fallthrough case encoder.OpMarshalText: p := load(ctxptr, code.Idx) if p == 0 { b = append(b, `""`...) b = appendComma(ctx, b) code = code.Next break } if (code.Flags&encoder.IsNilableTypeFlags) != 0 && (code.Flags&encoder.IndirectFlags) != 0 { p = ptrToPtr(p) } bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p)) if err != nil { return nil, err } b = appendComma(ctx, bb) code = code.Next case encoder.OpSlicePtr: p := loadNPtr(ctxptr, code.Idx, code.PtrNum) if p == 0 { b = appendNullComma(ctx, b) code = code.End.Next break } store(ctxptr, code.Idx, p) fallthrough case encoder.OpSlice: p := load(ctxptr, code.Idx) slice := ptrToSlice(p) if p == 0 || slice.Data == nil { b = appendNullComma(ctx, b) code = code.End.Next break } store(ctxptr, code.ElemIdx, 0) store(ctxptr, code.Length, uintptr(slice.Len)) store(ctxptr, code.Idx, uintptr(slice.Data)) if slice.Len > 0 { b = appendArrayHead(ctx, code, b) code = code.Next store(ctxptr, code.Idx, uintptr(slice.Data)) } else { b = appendEmptyArray(ctx, b) code = code.End.Next } case encoder.OpSliceElem: idx := load(ctxptr, code.ElemIdx) length := load(ctxptr, code.Length) idx++ if idx < length { b = appendArrayElemIndent(ctx, code, b) store(ctxptr, code.ElemIdx, idx) data := load(ctxptr, code.Idx) size := uintptr(code.Size) code = code.Next store(ctxptr, code.Idx, data+idx*size) } else { b = appendArrayEnd(ctx, code, b) code = code.End.Next } case encoder.OpArrayPtr: p := loadNPtr(ctxptr, code.Idx, code.PtrNum) if p == 0 { b = appendNullComma(ctx, b) code = code.End.Next break } store(ctxptr, code.Idx, p) fallthrough case encoder.OpArray: p := load(ctxptr, code.Idx) if p == 0 { b = appendNullComma(ctx, b) code = code.End.Next break } if code.Length > 0 { b = appendArrayHead(ctx, code, b) store(ctxptr, code.ElemIdx, 0) code = code.Next store(ctxptr, code.Idx, p) } else { b = appendEmptyArray(ctx, b) code = code.End.Next } case encoder.OpArrayElem: idx := load(ctxptr, code.ElemIdx) idx++ if idx < uintptr(code.Length) { b = appendArrayElemIndent(ctx, code, b) store(ctxptr, code.ElemIdx, idx) p := load(ctxptr, code.Idx) size := uintptr(code.Size) code = code.Next store(ctxptr, code.Idx, p+idx*size) } else { b = appendArrayEnd(ctx, code, b) code = code.End.Next } case encoder.OpMapPtr: p := loadNPtr(ctxptr, code.Idx, code.PtrNum) if p == 0 { b = appendNullComma(ctx, b) code = code.End.Next break } store(ctxptr, code.Idx, p) fallthrough case encoder.OpMap: p := load(ctxptr, code.Idx) if p == 0 { b = appendNullComma(ctx, b) code = code.End.Next break } uptr := ptrToUnsafePtr(p) mlen := maplen(uptr) if mlen <= 0 { b = appendEmptyObject(ctx, b) code = code.End.Next break } b = appendStructHead(ctx, b) unorderedMap := (ctx.Option.Flag & encoder.UnorderedMapOption) != 0 mapCtx := encoder.NewMapContext(mlen, unorderedMap) mapiterinit(code.Type, uptr, &mapCtx.Iter) store(ctxptr, code.Idx, uintptr(unsafe.Pointer(mapCtx))) ctx.KeepRefs = append(ctx.KeepRefs, unsafe.Pointer(mapCtx)) if unorderedMap { b = appendMapKeyIndent(ctx, code.Next, b) } else { mapCtx.Start = len(b) mapCtx.First = len(b) } key := mapiterkey(&mapCtx.Iter) store(ctxptr, code.Next.Idx, uintptr(key)) code = code.Next case encoder.OpMapKey: mapCtx := (*encoder.MapContext)(ptrToUnsafePtr(load(ctxptr, code.Idx))) idx := mapCtx.Idx idx++ if (ctx.Option.Flag & encoder.UnorderedMapOption) != 0 { if idx < mapCtx.Len { b = appendMapKeyIndent(ctx, code, b) mapCtx.Idx = int(idx) key := mapiterkey(&mapCtx.Iter) store(ctxptr, code.Next.Idx, uintptr(key)) code = code.Next } else { b = appendObjectEnd(ctx, code, b) encoder.ReleaseMapContext(mapCtx) code = code.End.Next } } else { mapCtx.Slice.Items[mapCtx.Idx].Value = b[mapCtx.Start:len(b)] if idx < mapCtx.Len { mapCtx.Idx = int(idx) mapCtx.Start = len(b) key := mapiterkey(&mapCtx.Iter) store(ctxptr, code.Next.Idx, uintptr(key)) code = code.Next } else { code = code.End } } case encoder.OpMapValue: mapCtx := (*encoder.MapContext)(ptrToUnsafePtr(load(ctxptr, code.Idx))) if (ctx.Option.Flag & encoder.UnorderedMapOption) != 0 { b = appendColon(ctx, b) } else { mapCtx.Slice.Items[mapCtx.Idx].Key = b[mapCtx.Start:len(b)] mapCtx.Start = len(b) } value := mapitervalue(&mapCtx.Iter) store(ctxptr, code.Next.Idx, uintptr(value)) mapiternext(&mapCtx.Iter) code = code.Next case encoder.OpMapEnd: // this operation only used by sorted map. mapCtx := (*encoder.MapContext)(ptrToUnsafePtr(load(ctxptr, code.Idx))) sort.Sort(mapCtx.Slice) buf := mapCtx.Buf for _, item := range mapCtx.Slice.Items { buf = appendMapKeyValue(ctx, code, buf, item.Key, item.Value) } buf = appendMapEnd(ctx, code, buf) b = b[:mapCtx.First] b = append(b, buf...) mapCtx.Buf = buf encoder.ReleaseMapContext(mapCtx) code = code.Next case encoder.OpRecursivePtr: p := load(ctxptr, code.Idx) if p == 0 { code = code.Next break } store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) fallthrough case encoder.OpRecursive: ptr := load(ctxptr, code.Idx) if ptr != 0 { if recursiveLevel > encoder.StartDetectingCyclesAfter { for _, seen := range ctx.SeenPtr { if ptr == seen { return nil, errUnsupportedValue(code, ptr) } } } } ctx.SeenPtr = append(ctx.SeenPtr, ptr) c := code.Jmp.Code curlen := uintptr(len(ctx.Ptrs)) offsetNum := ptrOffset / uintptrSize oldOffset := ptrOffset ptrOffset += code.Jmp.CurLen * uintptrSize oldBaseIndent := ctx.BaseIndent indentDiffFromTop := c.Indent - 1 ctx.BaseIndent += code.Indent - indentDiffFromTop newLen := offsetNum + code.Jmp.CurLen + code.Jmp.NextLen if curlen < newLen { ctx.Ptrs = append(ctx.Ptrs, make([]uintptr, newLen-curlen)...) } ctxptr = ctx.Ptr() + ptrOffset // assign new ctxptr store(ctxptr, c.Idx, ptr) store(ctxptr, c.End.Next.Idx, oldOffset) store(ctxptr, c.End.Next.ElemIdx, uintptr(unsafe.Pointer(code.Next))) storeIndent(ctxptr, c.End.Next, uintptr(oldBaseIndent)) code = c recursiveLevel++ case encoder.OpRecursiveEnd: recursiveLevel-- // restore ctxptr restoreIndent(ctx, code, ctxptr) offset := load(ctxptr, code.Idx) ctx.SeenPtr = ctx.SeenPtr[:len(ctx.SeenPtr)-1] codePtr := load(ctxptr, code.ElemIdx) code = (*encoder.Opcode)(ptrToUnsafePtr(codePtr)) ctxptr = ctx.Ptr() + offset ptrOffset = offset case encoder.OpStructPtrHead: p := load(ctxptr, code.Idx) if p == 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) fallthrough case encoder.OpStructHead: p := load(ctxptr, code.Idx) if p == 0 && ((code.Flags&encoder.IndirectFlags) != 0 || code.Next.Op == encoder.OpStructEnd) { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendStructHead(ctx, b) } if len(code.Key) > 0 { if (code.Flags&encoder.IsTaggedKeyFlags) != 0 || code.Flags&encoder.AnonymousKeyFlags == 0 { b = appendStructKey(ctx, code, b) } } p += uintptr(code.Offset) code = code.Next store(ctxptr, code.Idx, p) case encoder.OpStructPtrHeadOmitEmpty: p := load(ctxptr, code.Idx) if p == 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) fallthrough case encoder.OpStructHeadOmitEmpty: p := load(ctxptr, code.Idx) if p == 0 && ((code.Flags&encoder.IndirectFlags) != 0 || code.Next.Op == encoder.OpStructEnd) { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendStructHead(ctx, b) } p += uintptr(code.Offset) if p == 0 || (ptrToPtr(p) == 0 && (code.Flags&encoder.IsNextOpPtrTypeFlags) != 0) { code = code.NextField } else { b = appendStructKey(ctx, code, b) code = code.Next store(ctxptr, code.Idx, p) } case encoder.OpStructPtrHeadInt: if (code.Flags & encoder.IndirectFlags) != 0 { p := load(ctxptr, code.Idx) if p == 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) } fallthrough case encoder.OpStructHeadInt: p := load(ctxptr, code.Idx) if p == 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendStructHead(ctx, b) } b = appendStructKey(ctx, code, b) b = appendInt(ctx, b, p+uintptr(code.Offset), code) b = appendComma(ctx, b) code = code.Next case encoder.OpStructPtrHeadOmitEmptyInt: if (code.Flags & encoder.IndirectFlags) != 0 { p := load(ctxptr, code.Idx) if p == 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) } fallthrough case encoder.OpStructHeadOmitEmptyInt: p := load(ctxptr, code.Idx) if p == 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendStructHead(ctx, b) } u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) v := u64 & ((1 << code.NumBitSize) - 1) if v == 0 { code = code.NextField } else { b = appendStructKey(ctx, code, b) b = appendInt(ctx, b, p+uintptr(code.Offset), code) b = appendComma(ctx, b) code = code.Next } case encoder.OpStructPtrHeadIntString: if (code.Flags & encoder.IndirectFlags) != 0 { p := load(ctxptr, code.Idx) if p == 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) } fallthrough case encoder.OpStructHeadIntString: p := load(ctxptr, code.Idx) if p == 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendStructHead(ctx, b) } b = appendStructKey(ctx, code, b) b = append(b, '"') b = appendInt(ctx, b, p+uintptr(code.Offset), code) b = append(b, '"') b = appendComma(ctx, b) code = code.Next case encoder.OpStructPtrHeadOmitEmptyIntString: if (code.Flags & encoder.IndirectFlags) != 0 { p := load(ctxptr, code.Idx) if p == 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) } fallthrough case encoder.OpStructHeadOmitEmptyIntString: p := load(ctxptr, code.Idx) if p == 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendStructHead(ctx, b) } p += uintptr(code.Offset) u64 := ptrToUint64(p, code.NumBitSize) v := u64 & ((1 << code.NumBitSize) - 1) if v == 0 { code = code.NextField } else { b = appendStructKey(ctx, code, b) b = append(b, '"') b = appendInt(ctx, b, p, code) b = append(b, '"') b = appendComma(ctx, b) code = code.Next } case encoder.OpStructPtrHeadIntPtr: p := load(ctxptr, code.Idx) if p == 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) fallthrough case encoder.OpStructHeadIntPtr: p := load(ctxptr, code.Idx) if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendStructHead(ctx, b) } b = appendStructKey(ctx, code, b) if (code.Flags & encoder.IndirectFlags) != 0 { p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) } if p == 0 { b = appendNull(ctx, b) } else { b = appendInt(ctx, b, p, code) } b = appendComma(ctx, b) code = code.Next case encoder.OpStructPtrHeadOmitEmptyIntPtr: p := load(ctxptr, code.Idx) if p == 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) fallthrough case encoder.OpStructHeadOmitEmptyIntPtr: p := load(ctxptr, code.Idx) if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendStructHead(ctx, b) } if (code.Flags & encoder.IndirectFlags) != 0 { p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) } if p != 0 { b = appendStructKey(ctx, code, b) b = appendInt(ctx, b, p, code) b = appendComma(ctx, b) } code = code.Next case encoder.OpStructPtrHeadIntPtrString: p := load(ctxptr, code.Idx) if p == 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) fallthrough case encoder.OpStructHeadIntPtrString: p := load(ctxptr, code.Idx) if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendStructHead(ctx, b) } b = appendStructKey(ctx, code, b) if (code.Flags & encoder.IndirectFlags) != 0 { p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) } if p == 0 { b = appendNull(ctx, b) } else { b = append(b, '"') b = appendInt(ctx, b, p, code) b = append(b, '"') } b = appendComma(ctx, b) code = code.Next case encoder.OpStructPtrHeadOmitEmptyIntPtrString: p := load(ctxptr, code.Idx) if p == 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) fallthrough case encoder.OpStructHeadOmitEmptyIntPtrString: p := load(ctxptr, code.Idx) if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendStructHead(ctx, b) } if (code.Flags & encoder.IndirectFlags) != 0 { p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) } if p != 0 { b = appendStructKey(ctx, code, b) b = append(b, '"') b = appendInt(ctx, b, p, code) b = append(b, '"') b = appendComma(ctx, b) } code = code.Next case encoder.OpStructPtrHeadUint: if (code.Flags & encoder.IndirectFlags) != 0 { p := load(ctxptr, code.Idx) if p == 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) } fallthrough case encoder.OpStructHeadUint: p := load(ctxptr, code.Idx) if p == 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendStructHead(ctx, b) } b = appendStructKey(ctx, code, b) b = appendUint(ctx, b, p+uintptr(code.Offset), code) b = appendComma(ctx, b) code = code.Next case encoder.OpStructPtrHeadOmitEmptyUint: if (code.Flags & encoder.IndirectFlags) != 0 { p := load(ctxptr, code.Idx) if p == 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) } fallthrough case encoder.OpStructHeadOmitEmptyUint: p := load(ctxptr, code.Idx) if p == 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendStructHead(ctx, b) } u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) v := u64 & ((1 << code.NumBitSize) - 1) if v == 0 { code = code.NextField } else { b = appendStructKey(ctx, code, b) b = appendUint(ctx, b, p+uintptr(code.Offset), code) b = appendComma(ctx, b) code = code.Next } case encoder.OpStructPtrHeadUintString: if (code.Flags & encoder.IndirectFlags) != 0 { p := load(ctxptr, code.Idx) if p == 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) } fallthrough case encoder.OpStructHeadUintString: p := load(ctxptr, code.Idx) if p == 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendStructHead(ctx, b) } b = appendStructKey(ctx, code, b) b = append(b, '"') b = appendUint(ctx, b, p+uintptr(code.Offset), code) b = append(b, '"') b = appendComma(ctx, b) code = code.Next case encoder.OpStructPtrHeadOmitEmptyUintString: if (code.Flags & encoder.IndirectFlags) != 0 { p := load(ctxptr, code.Idx) if p == 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) } fallthrough case encoder.OpStructHeadOmitEmptyUintString: p := load(ctxptr, code.Idx) if p == 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendStructHead(ctx, b) } u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) v := u64 & ((1 << code.NumBitSize) - 1) if v == 0 { code = code.NextField } else { b = appendStructKey(ctx, code, b) b = append(b, '"') b = appendUint(ctx, b, p+uintptr(code.Offset), code) b = append(b, '"') b = appendComma(ctx, b) code = code.Next } case encoder.OpStructPtrHeadUintPtr: p := load(ctxptr, code.Idx) if p == 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) fallthrough case encoder.OpStructHeadUintPtr: p := load(ctxptr, code.Idx) if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendStructHead(ctx, b) } b = appendStructKey(ctx, code, b) if (code.Flags & encoder.IndirectFlags) != 0 { p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) } if p == 0 { b = appendNull(ctx, b) } else { b = appendUint(ctx, b, p, code) } b = appendComma(ctx, b) code = code.Next case encoder.OpStructPtrHeadOmitEmptyUintPtr: p := load(ctxptr, code.Idx) if p == 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) fallthrough case encoder.OpStructHeadOmitEmptyUintPtr: p := load(ctxptr, code.Idx) if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendStructHead(ctx, b) } if (code.Flags & encoder.IndirectFlags) != 0 { p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) } if p != 0 { b = appendStructKey(ctx, code, b) b = appendUint(ctx, b, p, code) b = appendComma(ctx, b) } code = code.Next case encoder.OpStructPtrHeadUintPtrString: p := load(ctxptr, code.Idx) if p == 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) fallthrough case encoder.OpStructHeadUintPtrString: p := load(ctxptr, code.Idx) if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendStructHead(ctx, b) } b = appendStructKey(ctx, code, b) if (code.Flags & encoder.IndirectFlags) != 0 { p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) } if p == 0 { b = appendNull(ctx, b) } else { b = append(b, '"') b = appendUint(ctx, b, p, code) b = append(b, '"') } b = appendComma(ctx, b) code = code.Next case encoder.OpStructPtrHeadOmitEmptyUintPtrString: p := load(ctxptr, code.Idx) if p == 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) fallthrough case encoder.OpStructHeadOmitEmptyUintPtrString: p := load(ctxptr, code.Idx) if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendStructHead(ctx, b) } if (code.Flags & encoder.IndirectFlags) != 0 { p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) } if p != 0 { b = appendStructKey(ctx, code, b) b = append(b, '"') b = appendUint(ctx, b, p, code) b = append(b, '"') b = appendComma(ctx, b) } code = code.Next case encoder.OpStructPtrHeadFloat32: if (code.Flags & encoder.IndirectFlags) != 0 { p := load(ctxptr, code.Idx) if p == 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) } fallthrough case encoder.OpStructHeadFloat32: p := load(ctxptr, code.Idx) if p == 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendStructHead(ctx, b) } b = appendStructKey(ctx, code, b) b = appendFloat32(ctx, b, ptrToFloat32(p+uintptr(code.Offset))) b = appendComma(ctx, b) code = code.Next case encoder.OpStructPtrHeadOmitEmptyFloat32: if (code.Flags & encoder.IndirectFlags) != 0 { p := load(ctxptr, code.Idx) if p == 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) } fallthrough case encoder.OpStructHeadOmitEmptyFloat32: p := load(ctxptr, code.Idx) if p == 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendStructHead(ctx, b) } v := ptrToFloat32(p + uintptr(code.Offset)) if v == 0 { code = code.NextField } else { b = appendStructKey(ctx, code, b) b = appendFloat32(ctx, b, v) b = appendComma(ctx, b) code = code.Next } case encoder.OpStructPtrHeadFloat32String: if (code.Flags & encoder.IndirectFlags) != 0 { p := load(ctxptr, code.Idx)
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
true
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/goccy/go-json/internal/encoder/vm/util.go
vendor/github.com/goccy/go-json/internal/encoder/vm/util.go
package vm import ( "encoding/json" "fmt" "unsafe" "github.com/goccy/go-json/internal/encoder" "github.com/goccy/go-json/internal/runtime" ) const uintptrSize = 4 << (^uintptr(0) >> 63) var ( appendInt = encoder.AppendInt appendUint = encoder.AppendUint appendFloat32 = encoder.AppendFloat32 appendFloat64 = encoder.AppendFloat64 appendString = encoder.AppendString appendByteSlice = encoder.AppendByteSlice appendNumber = encoder.AppendNumber errUnsupportedValue = encoder.ErrUnsupportedValue errUnsupportedFloat = encoder.ErrUnsupportedFloat mapiterinit = encoder.MapIterInit mapiterkey = encoder.MapIterKey mapitervalue = encoder.MapIterValue mapiternext = encoder.MapIterNext maplen = encoder.MapLen ) type emptyInterface struct { typ *runtime.Type ptr unsafe.Pointer } type nonEmptyInterface struct { itab *struct { ityp *runtime.Type // static interface type typ *runtime.Type // dynamic concrete type // unused fields... } ptr unsafe.Pointer } func errUnimplementedOp(op encoder.OpType) error { return fmt.Errorf("encoder: opcode %s has not been implemented", op) } func load(base uintptr, idx uint32) uintptr { addr := base + uintptr(idx) return **(**uintptr)(unsafe.Pointer(&addr)) } func store(base uintptr, idx uint32, p uintptr) { addr := base + uintptr(idx) **(**uintptr)(unsafe.Pointer(&addr)) = p } func loadNPtr(base uintptr, idx uint32, ptrNum uint8) uintptr { addr := base + uintptr(idx) p := **(**uintptr)(unsafe.Pointer(&addr)) for i := uint8(0); i < ptrNum; i++ { if p == 0 { return 0 } p = ptrToPtr(p) } return p } func ptrToUint64(p uintptr, bitSize uint8) uint64 { switch bitSize { case 8: return (uint64)(**(**uint8)(unsafe.Pointer(&p))) case 16: return (uint64)(**(**uint16)(unsafe.Pointer(&p))) case 32: return (uint64)(**(**uint32)(unsafe.Pointer(&p))) case 64: return **(**uint64)(unsafe.Pointer(&p)) } return 0 } func ptrToFloat32(p uintptr) float32 { return **(**float32)(unsafe.Pointer(&p)) } func ptrToFloat64(p uintptr) float64 { return **(**float64)(unsafe.Pointer(&p)) } func ptrToBool(p uintptr) bool { return **(**bool)(unsafe.Pointer(&p)) } func ptrToBytes(p uintptr) []byte { return **(**[]byte)(unsafe.Pointer(&p)) } func ptrToNumber(p uintptr) json.Number { return **(**json.Number)(unsafe.Pointer(&p)) } func ptrToString(p uintptr) string { return **(**string)(unsafe.Pointer(&p)) } func ptrToSlice(p uintptr) *runtime.SliceHeader { return *(**runtime.SliceHeader)(unsafe.Pointer(&p)) } func ptrToPtr(p uintptr) uintptr { return uintptr(**(**unsafe.Pointer)(unsafe.Pointer(&p))) } func ptrToNPtr(p uintptr, ptrNum uint8) uintptr { for i := uint8(0); i < ptrNum; i++ { if p == 0 { return 0 } p = ptrToPtr(p) } return p } func ptrToUnsafePtr(p uintptr) unsafe.Pointer { return *(*unsafe.Pointer)(unsafe.Pointer(&p)) } func ptrToInterface(code *encoder.Opcode, p uintptr) interface{} { return *(*interface{})(unsafe.Pointer(&emptyInterface{ typ: code.Type, ptr: *(*unsafe.Pointer)(unsafe.Pointer(&p)), })) } func appendBool(_ *encoder.RuntimeContext, b []byte, v bool) []byte { if v { return append(b, "true"...) } return append(b, "false"...) } func appendNull(_ *encoder.RuntimeContext, b []byte) []byte { return append(b, "null"...) } func appendComma(_ *encoder.RuntimeContext, b []byte) []byte { return append(b, ',') } func appendNullComma(_ *encoder.RuntimeContext, b []byte) []byte { return append(b, "null,"...) } func appendColon(_ *encoder.RuntimeContext, b []byte) []byte { last := len(b) - 1 b[last] = ':' return b } func appendMapKeyValue(_ *encoder.RuntimeContext, _ *encoder.Opcode, b, key, value []byte) []byte { b = append(b, key...) b[len(b)-1] = ':' return append(b, value...) } func appendMapEnd(_ *encoder.RuntimeContext, _ *encoder.Opcode, b []byte) []byte { b[len(b)-1] = '}' b = append(b, ',') return b } func appendMarshalJSON(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte, v interface{}) ([]byte, error) { return encoder.AppendMarshalJSON(ctx, code, b, v) } func appendMarshalText(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte, v interface{}) ([]byte, error) { return encoder.AppendMarshalText(ctx, code, b, v) } func appendArrayHead(_ *encoder.RuntimeContext, _ *encoder.Opcode, b []byte) []byte { return append(b, '[') } func appendArrayEnd(_ *encoder.RuntimeContext, _ *encoder.Opcode, b []byte) []byte { last := len(b) - 1 b[last] = ']' return append(b, ',') } func appendEmptyArray(_ *encoder.RuntimeContext, b []byte) []byte { return append(b, '[', ']', ',') } func appendEmptyObject(_ *encoder.RuntimeContext, b []byte) []byte { return append(b, '{', '}', ',') } func appendObjectEnd(_ *encoder.RuntimeContext, _ *encoder.Opcode, b []byte) []byte { last := len(b) - 1 b[last] = '}' return append(b, ',') } func appendStructHead(_ *encoder.RuntimeContext, b []byte) []byte { return append(b, '{') } func appendStructKey(_ *encoder.RuntimeContext, code *encoder.Opcode, b []byte) []byte { return append(b, code.Key...) } func appendStructEnd(_ *encoder.RuntimeContext, _ *encoder.Opcode, b []byte) []byte { return append(b, '}', ',') } func appendStructEndSkipLast(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte) []byte { last := len(b) - 1 if b[last] == ',' { b[last] = '}' return appendComma(ctx, b) } return appendStructEnd(ctx, code, b) } func restoreIndent(_ *encoder.RuntimeContext, _ *encoder.Opcode, _ uintptr) {} func storeIndent(_ uintptr, _ *encoder.Opcode, _ uintptr) {} func appendMapKeyIndent(_ *encoder.RuntimeContext, _ *encoder.Opcode, b []byte) []byte { return b } func appendArrayElemIndent(_ *encoder.RuntimeContext, _ *encoder.Opcode, b []byte) []byte { return b }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/goccy/go-json/internal/encoder/vm/debug_vm.go
vendor/github.com/goccy/go-json/internal/encoder/vm/debug_vm.go
package vm import ( "fmt" "io" "github.com/goccy/go-json/internal/encoder" ) func DebugRun(ctx *encoder.RuntimeContext, b []byte, codeSet *encoder.OpcodeSet) ([]byte, error) { defer func() { var code *encoder.Opcode if (ctx.Option.Flag & encoder.HTMLEscapeOption) != 0 { code = codeSet.EscapeKeyCode } else { code = codeSet.NoescapeKeyCode } if wc := ctx.Option.DebugDOTOut; wc != nil { _, _ = io.WriteString(wc, code.DumpDOT()) wc.Close() ctx.Option.DebugDOTOut = nil } if err := recover(); err != nil { w := ctx.Option.DebugOut fmt.Fprintln(w, "=============[DEBUG]===============") fmt.Fprintln(w, "* [TYPE]") fmt.Fprintln(w, codeSet.Type) fmt.Fprintf(w, "\n") fmt.Fprintln(w, "* [ALL OPCODE]") fmt.Fprintln(w, code.Dump()) fmt.Fprintf(w, "\n") fmt.Fprintln(w, "* [CONTEXT]") fmt.Fprintf(w, "%+v\n", ctx) fmt.Fprintln(w, "===================================") panic(err) } }() return Run(ctx, b, codeSet) }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/goccy/go-json/internal/encoder/vm_indent/hack.go
vendor/github.com/goccy/go-json/internal/encoder/vm_indent/hack.go
package vm_indent import ( // HACK: compile order // `vm`, `vm_indent`, `vm_color`, `vm_color_indent` packages uses a lot of memory to compile, // so forcibly make dependencies and avoid compiling in concurrent. // dependency order: vm => vm_indent => vm_color => vm_color_indent _ "github.com/goccy/go-json/internal/encoder/vm_color" )
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/goccy/go-json/internal/encoder/vm_indent/vm.go
vendor/github.com/goccy/go-json/internal/encoder/vm_indent/vm.go
// Code generated by internal/cmd/generator. DO NOT EDIT! package vm_indent import ( "math" "reflect" "sort" "unsafe" "github.com/goccy/go-json/internal/encoder" "github.com/goccy/go-json/internal/runtime" ) func Run(ctx *encoder.RuntimeContext, b []byte, codeSet *encoder.OpcodeSet) ([]byte, error) { recursiveLevel := 0 ptrOffset := uintptr(0) ctxptr := ctx.Ptr() var code *encoder.Opcode if (ctx.Option.Flag & encoder.HTMLEscapeOption) != 0 { code = codeSet.EscapeKeyCode } else { code = codeSet.NoescapeKeyCode } for { switch code.Op { default: return nil, errUnimplementedOp(code.Op) case encoder.OpPtr: p := load(ctxptr, code.Idx) code = code.Next store(ctxptr, code.Idx, ptrToPtr(p)) case encoder.OpIntPtr: p := loadNPtr(ctxptr, code.Idx, code.PtrNum) if p == 0 { b = appendNullComma(ctx, b) code = code.Next break } store(ctxptr, code.Idx, p) fallthrough case encoder.OpInt: b = appendInt(ctx, b, load(ctxptr, code.Idx), code) b = appendComma(ctx, b) code = code.Next case encoder.OpUintPtr: p := loadNPtr(ctxptr, code.Idx, code.PtrNum) if p == 0 { b = appendNullComma(ctx, b) code = code.Next break } store(ctxptr, code.Idx, p) fallthrough case encoder.OpUint: b = appendUint(ctx, b, load(ctxptr, code.Idx), code) b = appendComma(ctx, b) code = code.Next case encoder.OpIntString: b = append(b, '"') b = appendInt(ctx, b, load(ctxptr, code.Idx), code) b = append(b, '"') b = appendComma(ctx, b) code = code.Next case encoder.OpUintString: b = append(b, '"') b = appendUint(ctx, b, load(ctxptr, code.Idx), code) b = append(b, '"') b = appendComma(ctx, b) code = code.Next case encoder.OpFloat32Ptr: p := loadNPtr(ctxptr, code.Idx, code.PtrNum) if p == 0 { b = appendNull(ctx, b) b = appendComma(ctx, b) code = code.Next break } store(ctxptr, code.Idx, p) fallthrough case encoder.OpFloat32: b = appendFloat32(ctx, b, ptrToFloat32(load(ctxptr, code.Idx))) b = appendComma(ctx, b) code = code.Next case encoder.OpFloat64Ptr: p := loadNPtr(ctxptr, code.Idx, code.PtrNum) if p == 0 { b = appendNullComma(ctx, b) code = code.Next break } store(ctxptr, code.Idx, p) fallthrough case encoder.OpFloat64: v := ptrToFloat64(load(ctxptr, code.Idx)) if math.IsInf(v, 0) || math.IsNaN(v) { return nil, errUnsupportedFloat(v) } b = appendFloat64(ctx, b, v) b = appendComma(ctx, b) code = code.Next case encoder.OpStringPtr: p := loadNPtr(ctxptr, code.Idx, code.PtrNum) if p == 0 { b = appendNullComma(ctx, b) code = code.Next break } store(ctxptr, code.Idx, p) fallthrough case encoder.OpString: b = appendString(ctx, b, ptrToString(load(ctxptr, code.Idx))) b = appendComma(ctx, b) code = code.Next case encoder.OpBoolPtr: p := loadNPtr(ctxptr, code.Idx, code.PtrNum) if p == 0 { b = appendNullComma(ctx, b) code = code.Next break } store(ctxptr, code.Idx, p) fallthrough case encoder.OpBool: b = appendBool(ctx, b, ptrToBool(load(ctxptr, code.Idx))) b = appendComma(ctx, b) code = code.Next case encoder.OpBytesPtr: p := loadNPtr(ctxptr, code.Idx, code.PtrNum) if p == 0 { b = appendNullComma(ctx, b) code = code.Next break } store(ctxptr, code.Idx, p) fallthrough case encoder.OpBytes: b = appendByteSlice(ctx, b, ptrToBytes(load(ctxptr, code.Idx))) b = appendComma(ctx, b) code = code.Next case encoder.OpNumberPtr: p := loadNPtr(ctxptr, code.Idx, code.PtrNum) if p == 0 { b = appendNullComma(ctx, b) code = code.Next break } store(ctxptr, code.Idx, p) fallthrough case encoder.OpNumber: bb, err := appendNumber(ctx, b, ptrToNumber(load(ctxptr, code.Idx))) if err != nil { return nil, err } b = appendComma(ctx, bb) code = code.Next case encoder.OpInterfacePtr: p := loadNPtr(ctxptr, code.Idx, code.PtrNum) if p == 0 { b = appendNullComma(ctx, b) code = code.Next break } store(ctxptr, code.Idx, p) fallthrough case encoder.OpInterface: p := load(ctxptr, code.Idx) if p == 0 { b = appendNullComma(ctx, b) code = code.Next break } if recursiveLevel > encoder.StartDetectingCyclesAfter { for _, seen := range ctx.SeenPtr { if p == seen { return nil, errUnsupportedValue(code, p) } } } ctx.SeenPtr = append(ctx.SeenPtr, p) var ( typ *runtime.Type ifacePtr unsafe.Pointer ) up := ptrToUnsafePtr(p) if code.Flags&encoder.NonEmptyInterfaceFlags != 0 { iface := (*nonEmptyInterface)(up) ifacePtr = iface.ptr if iface.itab != nil { typ = iface.itab.typ } } else { iface := (*emptyInterface)(up) ifacePtr = iface.ptr typ = iface.typ } if ifacePtr == nil { isDirectedNil := typ != nil && typ.Kind() == reflect.Struct && !runtime.IfaceIndir(typ) if !isDirectedNil { b = appendNullComma(ctx, b) code = code.Next break } } ctx.KeepRefs = append(ctx.KeepRefs, up) ifaceCodeSet, err := encoder.CompileToGetCodeSet(ctx, uintptr(unsafe.Pointer(typ))) if err != nil { return nil, err } totalLength := uintptr(code.Length) + 3 nextTotalLength := uintptr(ifaceCodeSet.CodeLength) + 3 var c *encoder.Opcode if (ctx.Option.Flag & encoder.HTMLEscapeOption) != 0 { c = ifaceCodeSet.InterfaceEscapeKeyCode } else { c = ifaceCodeSet.InterfaceNoescapeKeyCode } curlen := uintptr(len(ctx.Ptrs)) offsetNum := ptrOffset / uintptrSize oldOffset := ptrOffset ptrOffset += totalLength * uintptrSize oldBaseIndent := ctx.BaseIndent ctx.BaseIndent += code.Indent newLen := offsetNum + totalLength + nextTotalLength if curlen < newLen { ctx.Ptrs = append(ctx.Ptrs, make([]uintptr, newLen-curlen)...) } ctxptr = ctx.Ptr() + ptrOffset // assign new ctxptr end := ifaceCodeSet.EndCode store(ctxptr, c.Idx, uintptr(ifacePtr)) store(ctxptr, end.Idx, oldOffset) store(ctxptr, end.ElemIdx, uintptr(unsafe.Pointer(code.Next))) storeIndent(ctxptr, end, uintptr(oldBaseIndent)) code = c recursiveLevel++ case encoder.OpInterfaceEnd: recursiveLevel-- // restore ctxptr offset := load(ctxptr, code.Idx) restoreIndent(ctx, code, ctxptr) ctx.SeenPtr = ctx.SeenPtr[:len(ctx.SeenPtr)-1] codePtr := load(ctxptr, code.ElemIdx) code = (*encoder.Opcode)(ptrToUnsafePtr(codePtr)) ctxptr = ctx.Ptr() + offset ptrOffset = offset case encoder.OpMarshalJSONPtr: p := load(ctxptr, code.Idx) if p == 0 { b = appendNullComma(ctx, b) code = code.Next break } store(ctxptr, code.Idx, ptrToPtr(p)) fallthrough case encoder.OpMarshalJSON: p := load(ctxptr, code.Idx) if p == 0 { b = appendNullComma(ctx, b) code = code.Next break } if (code.Flags&encoder.IsNilableTypeFlags) != 0 && (code.Flags&encoder.IndirectFlags) != 0 { p = ptrToPtr(p) } bb, err := appendMarshalJSON(ctx, code, b, ptrToInterface(code, p)) if err != nil { return nil, err } b = appendComma(ctx, bb) code = code.Next case encoder.OpMarshalTextPtr: p := load(ctxptr, code.Idx) if p == 0 { b = appendNullComma(ctx, b) code = code.Next break } store(ctxptr, code.Idx, ptrToPtr(p)) fallthrough case encoder.OpMarshalText: p := load(ctxptr, code.Idx) if p == 0 { b = append(b, `""`...) b = appendComma(ctx, b) code = code.Next break } if (code.Flags&encoder.IsNilableTypeFlags) != 0 && (code.Flags&encoder.IndirectFlags) != 0 { p = ptrToPtr(p) } bb, err := appendMarshalText(ctx, code, b, ptrToInterface(code, p)) if err != nil { return nil, err } b = appendComma(ctx, bb) code = code.Next case encoder.OpSlicePtr: p := loadNPtr(ctxptr, code.Idx, code.PtrNum) if p == 0 { b = appendNullComma(ctx, b) code = code.End.Next break } store(ctxptr, code.Idx, p) fallthrough case encoder.OpSlice: p := load(ctxptr, code.Idx) slice := ptrToSlice(p) if p == 0 || slice.Data == nil { b = appendNullComma(ctx, b) code = code.End.Next break } store(ctxptr, code.ElemIdx, 0) store(ctxptr, code.Length, uintptr(slice.Len)) store(ctxptr, code.Idx, uintptr(slice.Data)) if slice.Len > 0 { b = appendArrayHead(ctx, code, b) code = code.Next store(ctxptr, code.Idx, uintptr(slice.Data)) } else { b = appendEmptyArray(ctx, b) code = code.End.Next } case encoder.OpSliceElem: idx := load(ctxptr, code.ElemIdx) length := load(ctxptr, code.Length) idx++ if idx < length { b = appendArrayElemIndent(ctx, code, b) store(ctxptr, code.ElemIdx, idx) data := load(ctxptr, code.Idx) size := uintptr(code.Size) code = code.Next store(ctxptr, code.Idx, data+idx*size) } else { b = appendArrayEnd(ctx, code, b) code = code.End.Next } case encoder.OpArrayPtr: p := loadNPtr(ctxptr, code.Idx, code.PtrNum) if p == 0 { b = appendNullComma(ctx, b) code = code.End.Next break } store(ctxptr, code.Idx, p) fallthrough case encoder.OpArray: p := load(ctxptr, code.Idx) if p == 0 { b = appendNullComma(ctx, b) code = code.End.Next break } if code.Length > 0 { b = appendArrayHead(ctx, code, b) store(ctxptr, code.ElemIdx, 0) code = code.Next store(ctxptr, code.Idx, p) } else { b = appendEmptyArray(ctx, b) code = code.End.Next } case encoder.OpArrayElem: idx := load(ctxptr, code.ElemIdx) idx++ if idx < uintptr(code.Length) { b = appendArrayElemIndent(ctx, code, b) store(ctxptr, code.ElemIdx, idx) p := load(ctxptr, code.Idx) size := uintptr(code.Size) code = code.Next store(ctxptr, code.Idx, p+idx*size) } else { b = appendArrayEnd(ctx, code, b) code = code.End.Next } case encoder.OpMapPtr: p := loadNPtr(ctxptr, code.Idx, code.PtrNum) if p == 0 { b = appendNullComma(ctx, b) code = code.End.Next break } store(ctxptr, code.Idx, p) fallthrough case encoder.OpMap: p := load(ctxptr, code.Idx) if p == 0 { b = appendNullComma(ctx, b) code = code.End.Next break } uptr := ptrToUnsafePtr(p) mlen := maplen(uptr) if mlen <= 0 { b = appendEmptyObject(ctx, b) code = code.End.Next break } b = appendStructHead(ctx, b) unorderedMap := (ctx.Option.Flag & encoder.UnorderedMapOption) != 0 mapCtx := encoder.NewMapContext(mlen, unorderedMap) mapiterinit(code.Type, uptr, &mapCtx.Iter) store(ctxptr, code.Idx, uintptr(unsafe.Pointer(mapCtx))) ctx.KeepRefs = append(ctx.KeepRefs, unsafe.Pointer(mapCtx)) if unorderedMap { b = appendMapKeyIndent(ctx, code.Next, b) } else { mapCtx.Start = len(b) mapCtx.First = len(b) } key := mapiterkey(&mapCtx.Iter) store(ctxptr, code.Next.Idx, uintptr(key)) code = code.Next case encoder.OpMapKey: mapCtx := (*encoder.MapContext)(ptrToUnsafePtr(load(ctxptr, code.Idx))) idx := mapCtx.Idx idx++ if (ctx.Option.Flag & encoder.UnorderedMapOption) != 0 { if idx < mapCtx.Len { b = appendMapKeyIndent(ctx, code, b) mapCtx.Idx = int(idx) key := mapiterkey(&mapCtx.Iter) store(ctxptr, code.Next.Idx, uintptr(key)) code = code.Next } else { b = appendObjectEnd(ctx, code, b) encoder.ReleaseMapContext(mapCtx) code = code.End.Next } } else { mapCtx.Slice.Items[mapCtx.Idx].Value = b[mapCtx.Start:len(b)] if idx < mapCtx.Len { mapCtx.Idx = int(idx) mapCtx.Start = len(b) key := mapiterkey(&mapCtx.Iter) store(ctxptr, code.Next.Idx, uintptr(key)) code = code.Next } else { code = code.End } } case encoder.OpMapValue: mapCtx := (*encoder.MapContext)(ptrToUnsafePtr(load(ctxptr, code.Idx))) if (ctx.Option.Flag & encoder.UnorderedMapOption) != 0 { b = appendColon(ctx, b) } else { mapCtx.Slice.Items[mapCtx.Idx].Key = b[mapCtx.Start:len(b)] mapCtx.Start = len(b) } value := mapitervalue(&mapCtx.Iter) store(ctxptr, code.Next.Idx, uintptr(value)) mapiternext(&mapCtx.Iter) code = code.Next case encoder.OpMapEnd: // this operation only used by sorted map. mapCtx := (*encoder.MapContext)(ptrToUnsafePtr(load(ctxptr, code.Idx))) sort.Sort(mapCtx.Slice) buf := mapCtx.Buf for _, item := range mapCtx.Slice.Items { buf = appendMapKeyValue(ctx, code, buf, item.Key, item.Value) } buf = appendMapEnd(ctx, code, buf) b = b[:mapCtx.First] b = append(b, buf...) mapCtx.Buf = buf encoder.ReleaseMapContext(mapCtx) code = code.Next case encoder.OpRecursivePtr: p := load(ctxptr, code.Idx) if p == 0 { code = code.Next break } store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) fallthrough case encoder.OpRecursive: ptr := load(ctxptr, code.Idx) if ptr != 0 { if recursiveLevel > encoder.StartDetectingCyclesAfter { for _, seen := range ctx.SeenPtr { if ptr == seen { return nil, errUnsupportedValue(code, ptr) } } } } ctx.SeenPtr = append(ctx.SeenPtr, ptr) c := code.Jmp.Code curlen := uintptr(len(ctx.Ptrs)) offsetNum := ptrOffset / uintptrSize oldOffset := ptrOffset ptrOffset += code.Jmp.CurLen * uintptrSize oldBaseIndent := ctx.BaseIndent indentDiffFromTop := c.Indent - 1 ctx.BaseIndent += code.Indent - indentDiffFromTop newLen := offsetNum + code.Jmp.CurLen + code.Jmp.NextLen if curlen < newLen { ctx.Ptrs = append(ctx.Ptrs, make([]uintptr, newLen-curlen)...) } ctxptr = ctx.Ptr() + ptrOffset // assign new ctxptr store(ctxptr, c.Idx, ptr) store(ctxptr, c.End.Next.Idx, oldOffset) store(ctxptr, c.End.Next.ElemIdx, uintptr(unsafe.Pointer(code.Next))) storeIndent(ctxptr, c.End.Next, uintptr(oldBaseIndent)) code = c recursiveLevel++ case encoder.OpRecursiveEnd: recursiveLevel-- // restore ctxptr restoreIndent(ctx, code, ctxptr) offset := load(ctxptr, code.Idx) ctx.SeenPtr = ctx.SeenPtr[:len(ctx.SeenPtr)-1] codePtr := load(ctxptr, code.ElemIdx) code = (*encoder.Opcode)(ptrToUnsafePtr(codePtr)) ctxptr = ctx.Ptr() + offset ptrOffset = offset case encoder.OpStructPtrHead: p := load(ctxptr, code.Idx) if p == 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) fallthrough case encoder.OpStructHead: p := load(ctxptr, code.Idx) if p == 0 && ((code.Flags&encoder.IndirectFlags) != 0 || code.Next.Op == encoder.OpStructEnd) { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendStructHead(ctx, b) } if len(code.Key) > 0 { if (code.Flags&encoder.IsTaggedKeyFlags) != 0 || code.Flags&encoder.AnonymousKeyFlags == 0 { b = appendStructKey(ctx, code, b) } } p += uintptr(code.Offset) code = code.Next store(ctxptr, code.Idx, p) case encoder.OpStructPtrHeadOmitEmpty: p := load(ctxptr, code.Idx) if p == 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) fallthrough case encoder.OpStructHeadOmitEmpty: p := load(ctxptr, code.Idx) if p == 0 && ((code.Flags&encoder.IndirectFlags) != 0 || code.Next.Op == encoder.OpStructEnd) { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendStructHead(ctx, b) } p += uintptr(code.Offset) if p == 0 || (ptrToPtr(p) == 0 && (code.Flags&encoder.IsNextOpPtrTypeFlags) != 0) { code = code.NextField } else { b = appendStructKey(ctx, code, b) code = code.Next store(ctxptr, code.Idx, p) } case encoder.OpStructPtrHeadInt: if (code.Flags & encoder.IndirectFlags) != 0 { p := load(ctxptr, code.Idx) if p == 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) } fallthrough case encoder.OpStructHeadInt: p := load(ctxptr, code.Idx) if p == 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendStructHead(ctx, b) } b = appendStructKey(ctx, code, b) b = appendInt(ctx, b, p+uintptr(code.Offset), code) b = appendComma(ctx, b) code = code.Next case encoder.OpStructPtrHeadOmitEmptyInt: if (code.Flags & encoder.IndirectFlags) != 0 { p := load(ctxptr, code.Idx) if p == 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) } fallthrough case encoder.OpStructHeadOmitEmptyInt: p := load(ctxptr, code.Idx) if p == 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendStructHead(ctx, b) } u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) v := u64 & ((1 << code.NumBitSize) - 1) if v == 0 { code = code.NextField } else { b = appendStructKey(ctx, code, b) b = appendInt(ctx, b, p+uintptr(code.Offset), code) b = appendComma(ctx, b) code = code.Next } case encoder.OpStructPtrHeadIntString: if (code.Flags & encoder.IndirectFlags) != 0 { p := load(ctxptr, code.Idx) if p == 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) } fallthrough case encoder.OpStructHeadIntString: p := load(ctxptr, code.Idx) if p == 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendStructHead(ctx, b) } b = appendStructKey(ctx, code, b) b = append(b, '"') b = appendInt(ctx, b, p+uintptr(code.Offset), code) b = append(b, '"') b = appendComma(ctx, b) code = code.Next case encoder.OpStructPtrHeadOmitEmptyIntString: if (code.Flags & encoder.IndirectFlags) != 0 { p := load(ctxptr, code.Idx) if p == 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) } fallthrough case encoder.OpStructHeadOmitEmptyIntString: p := load(ctxptr, code.Idx) if p == 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendStructHead(ctx, b) } p += uintptr(code.Offset) u64 := ptrToUint64(p, code.NumBitSize) v := u64 & ((1 << code.NumBitSize) - 1) if v == 0 { code = code.NextField } else { b = appendStructKey(ctx, code, b) b = append(b, '"') b = appendInt(ctx, b, p, code) b = append(b, '"') b = appendComma(ctx, b) code = code.Next } case encoder.OpStructPtrHeadIntPtr: p := load(ctxptr, code.Idx) if p == 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) fallthrough case encoder.OpStructHeadIntPtr: p := load(ctxptr, code.Idx) if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendStructHead(ctx, b) } b = appendStructKey(ctx, code, b) if (code.Flags & encoder.IndirectFlags) != 0 { p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) } if p == 0 { b = appendNull(ctx, b) } else { b = appendInt(ctx, b, p, code) } b = appendComma(ctx, b) code = code.Next case encoder.OpStructPtrHeadOmitEmptyIntPtr: p := load(ctxptr, code.Idx) if p == 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) fallthrough case encoder.OpStructHeadOmitEmptyIntPtr: p := load(ctxptr, code.Idx) if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendStructHead(ctx, b) } if (code.Flags & encoder.IndirectFlags) != 0 { p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) } if p != 0 { b = appendStructKey(ctx, code, b) b = appendInt(ctx, b, p, code) b = appendComma(ctx, b) } code = code.Next case encoder.OpStructPtrHeadIntPtrString: p := load(ctxptr, code.Idx) if p == 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) fallthrough case encoder.OpStructHeadIntPtrString: p := load(ctxptr, code.Idx) if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendStructHead(ctx, b) } b = appendStructKey(ctx, code, b) if (code.Flags & encoder.IndirectFlags) != 0 { p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) } if p == 0 { b = appendNull(ctx, b) } else { b = append(b, '"') b = appendInt(ctx, b, p, code) b = append(b, '"') } b = appendComma(ctx, b) code = code.Next case encoder.OpStructPtrHeadOmitEmptyIntPtrString: p := load(ctxptr, code.Idx) if p == 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) fallthrough case encoder.OpStructHeadOmitEmptyIntPtrString: p := load(ctxptr, code.Idx) if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendStructHead(ctx, b) } if (code.Flags & encoder.IndirectFlags) != 0 { p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) } if p != 0 { b = appendStructKey(ctx, code, b) b = append(b, '"') b = appendInt(ctx, b, p, code) b = append(b, '"') b = appendComma(ctx, b) } code = code.Next case encoder.OpStructPtrHeadUint: if (code.Flags & encoder.IndirectFlags) != 0 { p := load(ctxptr, code.Idx) if p == 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) } fallthrough case encoder.OpStructHeadUint: p := load(ctxptr, code.Idx) if p == 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendStructHead(ctx, b) } b = appendStructKey(ctx, code, b) b = appendUint(ctx, b, p+uintptr(code.Offset), code) b = appendComma(ctx, b) code = code.Next case encoder.OpStructPtrHeadOmitEmptyUint: if (code.Flags & encoder.IndirectFlags) != 0 { p := load(ctxptr, code.Idx) if p == 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) } fallthrough case encoder.OpStructHeadOmitEmptyUint: p := load(ctxptr, code.Idx) if p == 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendStructHead(ctx, b) } u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) v := u64 & ((1 << code.NumBitSize) - 1) if v == 0 { code = code.NextField } else { b = appendStructKey(ctx, code, b) b = appendUint(ctx, b, p+uintptr(code.Offset), code) b = appendComma(ctx, b) code = code.Next } case encoder.OpStructPtrHeadUintString: if (code.Flags & encoder.IndirectFlags) != 0 { p := load(ctxptr, code.Idx) if p == 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) } fallthrough case encoder.OpStructHeadUintString: p := load(ctxptr, code.Idx) if p == 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendStructHead(ctx, b) } b = appendStructKey(ctx, code, b) b = append(b, '"') b = appendUint(ctx, b, p+uintptr(code.Offset), code) b = append(b, '"') b = appendComma(ctx, b) code = code.Next case encoder.OpStructPtrHeadOmitEmptyUintString: if (code.Flags & encoder.IndirectFlags) != 0 { p := load(ctxptr, code.Idx) if p == 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) } fallthrough case encoder.OpStructHeadOmitEmptyUintString: p := load(ctxptr, code.Idx) if p == 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendStructHead(ctx, b) } u64 := ptrToUint64(p+uintptr(code.Offset), code.NumBitSize) v := u64 & ((1 << code.NumBitSize) - 1) if v == 0 { code = code.NextField } else { b = appendStructKey(ctx, code, b) b = append(b, '"') b = appendUint(ctx, b, p+uintptr(code.Offset), code) b = append(b, '"') b = appendComma(ctx, b) code = code.Next } case encoder.OpStructPtrHeadUintPtr: p := load(ctxptr, code.Idx) if p == 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) fallthrough case encoder.OpStructHeadUintPtr: p := load(ctxptr, code.Idx) if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendStructHead(ctx, b) } b = appendStructKey(ctx, code, b) if (code.Flags & encoder.IndirectFlags) != 0 { p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) } if p == 0 { b = appendNull(ctx, b) } else { b = appendUint(ctx, b, p, code) } b = appendComma(ctx, b) code = code.Next case encoder.OpStructPtrHeadOmitEmptyUintPtr: p := load(ctxptr, code.Idx) if p == 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) fallthrough case encoder.OpStructHeadOmitEmptyUintPtr: p := load(ctxptr, code.Idx) if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendStructHead(ctx, b) } if (code.Flags & encoder.IndirectFlags) != 0 { p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) } if p != 0 { b = appendStructKey(ctx, code, b) b = appendUint(ctx, b, p, code) b = appendComma(ctx, b) } code = code.Next case encoder.OpStructPtrHeadUintPtrString: p := load(ctxptr, code.Idx) if p == 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) fallthrough case encoder.OpStructHeadUintPtrString: p := load(ctxptr, code.Idx) if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendStructHead(ctx, b) } b = appendStructKey(ctx, code, b) if (code.Flags & encoder.IndirectFlags) != 0 { p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) } if p == 0 { b = appendNull(ctx, b) } else { b = append(b, '"') b = appendUint(ctx, b, p, code) b = append(b, '"') } b = appendComma(ctx, b) code = code.Next case encoder.OpStructPtrHeadOmitEmptyUintPtrString: p := load(ctxptr, code.Idx) if p == 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) fallthrough case encoder.OpStructHeadOmitEmptyUintPtrString: p := load(ctxptr, code.Idx) if p == 0 && (code.Flags&encoder.IndirectFlags) != 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendStructHead(ctx, b) } if (code.Flags & encoder.IndirectFlags) != 0 { p = ptrToNPtr(p+uintptr(code.Offset), code.PtrNum) } if p != 0 { b = appendStructKey(ctx, code, b) b = append(b, '"') b = appendUint(ctx, b, p, code) b = append(b, '"') b = appendComma(ctx, b) } code = code.Next case encoder.OpStructPtrHeadFloat32: if (code.Flags & encoder.IndirectFlags) != 0 { p := load(ctxptr, code.Idx) if p == 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) } fallthrough case encoder.OpStructHeadFloat32: p := load(ctxptr, code.Idx) if p == 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendStructHead(ctx, b) } b = appendStructKey(ctx, code, b) b = appendFloat32(ctx, b, ptrToFloat32(p+uintptr(code.Offset))) b = appendComma(ctx, b) code = code.Next case encoder.OpStructPtrHeadOmitEmptyFloat32: if (code.Flags & encoder.IndirectFlags) != 0 { p := load(ctxptr, code.Idx) if p == 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } store(ctxptr, code.Idx, ptrToNPtr(p, code.PtrNum)) } fallthrough case encoder.OpStructHeadOmitEmptyFloat32: p := load(ctxptr, code.Idx) if p == 0 { if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendNullComma(ctx, b) } code = code.End.Next break } if code.Flags&encoder.AnonymousHeadFlags == 0 { b = appendStructHead(ctx, b) } v := ptrToFloat32(p + uintptr(code.Offset)) if v == 0 { code = code.NextField } else { b = appendStructKey(ctx, code, b) b = appendFloat32(ctx, b, v) b = appendComma(ctx, b) code = code.Next } case encoder.OpStructPtrHeadFloat32String: if (code.Flags & encoder.IndirectFlags) != 0 { p := load(ctxptr, code.Idx)
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
true
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/goccy/go-json/internal/encoder/vm_indent/util.go
vendor/github.com/goccy/go-json/internal/encoder/vm_indent/util.go
package vm_indent import ( "encoding/json" "fmt" "unsafe" "github.com/goccy/go-json/internal/encoder" "github.com/goccy/go-json/internal/runtime" ) const uintptrSize = 4 << (^uintptr(0) >> 63) var ( appendInt = encoder.AppendInt appendUint = encoder.AppendUint appendFloat32 = encoder.AppendFloat32 appendFloat64 = encoder.AppendFloat64 appendString = encoder.AppendString appendByteSlice = encoder.AppendByteSlice appendNumber = encoder.AppendNumber appendStructEnd = encoder.AppendStructEndIndent appendIndent = encoder.AppendIndent errUnsupportedValue = encoder.ErrUnsupportedValue errUnsupportedFloat = encoder.ErrUnsupportedFloat mapiterinit = encoder.MapIterInit mapiterkey = encoder.MapIterKey mapitervalue = encoder.MapIterValue mapiternext = encoder.MapIterNext maplen = encoder.MapLen ) type emptyInterface struct { typ *runtime.Type ptr unsafe.Pointer } type nonEmptyInterface struct { itab *struct { ityp *runtime.Type // static interface type typ *runtime.Type // dynamic concrete type // unused fields... } ptr unsafe.Pointer } func errUnimplementedOp(op encoder.OpType) error { return fmt.Errorf("encoder (indent): opcode %s has not been implemented", op) } func load(base uintptr, idx uint32) uintptr { addr := base + uintptr(idx) return **(**uintptr)(unsafe.Pointer(&addr)) } func store(base uintptr, idx uint32, p uintptr) { addr := base + uintptr(idx) **(**uintptr)(unsafe.Pointer(&addr)) = p } func loadNPtr(base uintptr, idx uint32, ptrNum uint8) uintptr { addr := base + uintptr(idx) p := **(**uintptr)(unsafe.Pointer(&addr)) for i := uint8(0); i < ptrNum; i++ { if p == 0 { return 0 } p = ptrToPtr(p) } return p } func ptrToUint64(p uintptr, bitSize uint8) uint64 { switch bitSize { case 8: return (uint64)(**(**uint8)(unsafe.Pointer(&p))) case 16: return (uint64)(**(**uint16)(unsafe.Pointer(&p))) case 32: return (uint64)(**(**uint32)(unsafe.Pointer(&p))) case 64: return **(**uint64)(unsafe.Pointer(&p)) } return 0 } func ptrToFloat32(p uintptr) float32 { return **(**float32)(unsafe.Pointer(&p)) } func ptrToFloat64(p uintptr) float64 { return **(**float64)(unsafe.Pointer(&p)) } func ptrToBool(p uintptr) bool { return **(**bool)(unsafe.Pointer(&p)) } func ptrToBytes(p uintptr) []byte { return **(**[]byte)(unsafe.Pointer(&p)) } func ptrToNumber(p uintptr) json.Number { return **(**json.Number)(unsafe.Pointer(&p)) } func ptrToString(p uintptr) string { return **(**string)(unsafe.Pointer(&p)) } func ptrToSlice(p uintptr) *runtime.SliceHeader { return *(**runtime.SliceHeader)(unsafe.Pointer(&p)) } func ptrToPtr(p uintptr) uintptr { return uintptr(**(**unsafe.Pointer)(unsafe.Pointer(&p))) } func ptrToNPtr(p uintptr, ptrNum uint8) uintptr { for i := uint8(0); i < ptrNum; i++ { if p == 0 { return 0 } p = ptrToPtr(p) } return p } func ptrToUnsafePtr(p uintptr) unsafe.Pointer { return *(*unsafe.Pointer)(unsafe.Pointer(&p)) } func ptrToInterface(code *encoder.Opcode, p uintptr) interface{} { return *(*interface{})(unsafe.Pointer(&emptyInterface{ typ: code.Type, ptr: *(*unsafe.Pointer)(unsafe.Pointer(&p)), })) } func appendBool(_ *encoder.RuntimeContext, b []byte, v bool) []byte { if v { return append(b, "true"...) } return append(b, "false"...) } func appendNull(_ *encoder.RuntimeContext, b []byte) []byte { return append(b, "null"...) } func appendComma(_ *encoder.RuntimeContext, b []byte) []byte { return append(b, ',', '\n') } func appendNullComma(_ *encoder.RuntimeContext, b []byte) []byte { return append(b, "null,\n"...) } func appendColon(_ *encoder.RuntimeContext, b []byte) []byte { return append(b[:len(b)-2], ':', ' ') } func appendMapKeyValue(ctx *encoder.RuntimeContext, code *encoder.Opcode, b, key, value []byte) []byte { b = appendIndent(ctx, b, code.Indent+1) b = append(b, key...) b[len(b)-2] = ':' b[len(b)-1] = ' ' return append(b, value...) } func appendMapEnd(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte) []byte { b = b[:len(b)-2] b = append(b, '\n') b = appendIndent(ctx, b, code.Indent) return append(b, '}', ',', '\n') } func appendArrayHead(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte) []byte { b = append(b, '[', '\n') return appendIndent(ctx, b, code.Indent+1) } func appendArrayEnd(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte) []byte { b = b[:len(b)-2] b = append(b, '\n') b = appendIndent(ctx, b, code.Indent) return append(b, ']', ',', '\n') } func appendEmptyArray(_ *encoder.RuntimeContext, b []byte) []byte { return append(b, '[', ']', ',', '\n') } func appendEmptyObject(_ *encoder.RuntimeContext, b []byte) []byte { return append(b, '{', '}', ',', '\n') } func appendObjectEnd(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte) []byte { last := len(b) - 1 // replace comma to newline b[last-1] = '\n' b = appendIndent(ctx, b[:last], code.Indent) return append(b, '}', ',', '\n') } func appendMarshalJSON(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte, v interface{}) ([]byte, error) { return encoder.AppendMarshalJSONIndent(ctx, code, b, v) } func appendMarshalText(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte, v interface{}) ([]byte, error) { return encoder.AppendMarshalTextIndent(ctx, code, b, v) } func appendStructHead(_ *encoder.RuntimeContext, b []byte) []byte { return append(b, '{', '\n') } func appendStructKey(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte) []byte { b = appendIndent(ctx, b, code.Indent) b = append(b, code.Key...) return append(b, ' ') } func appendStructEndSkipLast(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte) []byte { last := len(b) - 1 if b[last-1] == '{' { b[last] = '}' } else { if b[last] == '\n' { // to remove ',' and '\n' characters b = b[:len(b)-2] } b = append(b, '\n') b = appendIndent(ctx, b, code.Indent-1) b = append(b, '}') } return appendComma(ctx, b) } func restoreIndent(ctx *encoder.RuntimeContext, code *encoder.Opcode, ctxptr uintptr) { ctx.BaseIndent = uint32(load(ctxptr, code.Length)) } func storeIndent(ctxptr uintptr, code *encoder.Opcode, indent uintptr) { store(ctxptr, code.Length, indent) } func appendArrayElemIndent(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte) []byte { return appendIndent(ctx, b, code.Indent+1) } func appendMapKeyIndent(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte) []byte { return appendIndent(ctx, b, code.Indent) }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/goccy/go-json/internal/encoder/vm_indent/debug_vm.go
vendor/github.com/goccy/go-json/internal/encoder/vm_indent/debug_vm.go
package vm_indent import ( "fmt" "github.com/goccy/go-json/internal/encoder" ) func DebugRun(ctx *encoder.RuntimeContext, b []byte, codeSet *encoder.OpcodeSet) ([]byte, error) { var code *encoder.Opcode if (ctx.Option.Flag & encoder.HTMLEscapeOption) != 0 { code = codeSet.EscapeKeyCode } else { code = codeSet.NoescapeKeyCode } defer func() { if err := recover(); err != nil { w := ctx.Option.DebugOut fmt.Fprintln(w, "=============[DEBUG]===============") fmt.Fprintln(w, "* [TYPE]") fmt.Fprintln(w, codeSet.Type) fmt.Fprintf(w, "\n") fmt.Fprintln(w, "* [ALL OPCODE]") fmt.Fprintln(w, code.Dump()) fmt.Fprintf(w, "\n") fmt.Fprintln(w, "* [CONTEXT]") fmt.Fprintf(w, "%+v\n", ctx) fmt.Fprintln(w, "===================================") panic(err) } }() return Run(ctx, b, codeSet) }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/goccy/go-json/internal/decoder/ptr.go
vendor/github.com/goccy/go-json/internal/decoder/ptr.go
package decoder import ( "fmt" "unsafe" "github.com/goccy/go-json/internal/runtime" ) type ptrDecoder struct { dec Decoder typ *runtime.Type structName string fieldName string } func newPtrDecoder(dec Decoder, typ *runtime.Type, structName, fieldName string) *ptrDecoder { return &ptrDecoder{ dec: dec, typ: typ, structName: structName, fieldName: fieldName, } } func (d *ptrDecoder) contentDecoder() Decoder { dec, ok := d.dec.(*ptrDecoder) if !ok { return d.dec } return dec.contentDecoder() } //nolint:golint //go:linkname unsafe_New reflect.unsafe_New func unsafe_New(*runtime.Type) unsafe.Pointer func UnsafeNew(t *runtime.Type) unsafe.Pointer { return unsafe_New(t) } func (d *ptrDecoder) DecodeStream(s *Stream, depth int64, p unsafe.Pointer) error { if s.skipWhiteSpace() == nul { s.read() } if s.char() == 'n' { if err := nullBytes(s); err != nil { return err } *(*unsafe.Pointer)(p) = nil return nil } var newptr unsafe.Pointer if *(*unsafe.Pointer)(p) == nil { newptr = unsafe_New(d.typ) *(*unsafe.Pointer)(p) = newptr } else { newptr = *(*unsafe.Pointer)(p) } if err := d.dec.DecodeStream(s, depth, newptr); err != nil { return err } return nil } func (d *ptrDecoder) Decode(ctx *RuntimeContext, cursor, depth int64, p unsafe.Pointer) (int64, error) { buf := ctx.Buf cursor = skipWhiteSpace(buf, cursor) if buf[cursor] == 'n' { if err := validateNull(buf, cursor); err != nil { return 0, err } if p != nil { *(*unsafe.Pointer)(p) = nil } cursor += 4 return cursor, nil } var newptr unsafe.Pointer if *(*unsafe.Pointer)(p) == nil { newptr = unsafe_New(d.typ) *(*unsafe.Pointer)(p) = newptr } else { newptr = *(*unsafe.Pointer)(p) } c, err := d.dec.Decode(ctx, cursor, depth, newptr) if err != nil { *(*unsafe.Pointer)(p) = nil return 0, err } cursor = c return cursor, nil } func (d *ptrDecoder) DecodePath(ctx *RuntimeContext, cursor, depth int64) ([][]byte, int64, error) { return nil, 0, fmt.Errorf("json: ptr decoder does not support decode path") }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/goccy/go-json/internal/decoder/bool.go
vendor/github.com/goccy/go-json/internal/decoder/bool.go
package decoder import ( "fmt" "unsafe" "github.com/goccy/go-json/internal/errors" ) type boolDecoder struct { structName string fieldName string } func newBoolDecoder(structName, fieldName string) *boolDecoder { return &boolDecoder{structName: structName, fieldName: fieldName} } func (d *boolDecoder) DecodeStream(s *Stream, depth int64, p unsafe.Pointer) error { c := s.skipWhiteSpace() for { switch c { case 't': if err := trueBytes(s); err != nil { return err } **(**bool)(unsafe.Pointer(&p)) = true return nil case 'f': if err := falseBytes(s); err != nil { return err } **(**bool)(unsafe.Pointer(&p)) = false return nil case 'n': if err := nullBytes(s); err != nil { return err } return nil case nul: if s.read() { c = s.char() continue } goto ERROR } break } ERROR: return errors.ErrUnexpectedEndOfJSON("bool", s.totalOffset()) } func (d *boolDecoder) Decode(ctx *RuntimeContext, cursor, depth int64, p unsafe.Pointer) (int64, error) { buf := ctx.Buf cursor = skipWhiteSpace(buf, cursor) switch buf[cursor] { case 't': if err := validateTrue(buf, cursor); err != nil { return 0, err } cursor += 4 **(**bool)(unsafe.Pointer(&p)) = true return cursor, nil case 'f': if err := validateFalse(buf, cursor); err != nil { return 0, err } cursor += 5 **(**bool)(unsafe.Pointer(&p)) = false return cursor, nil case 'n': if err := validateNull(buf, cursor); err != nil { return 0, err } cursor += 4 return cursor, nil } return 0, errors.ErrUnexpectedEndOfJSON("bool", cursor) } func (d *boolDecoder) DecodePath(ctx *RuntimeContext, cursor, depth int64) ([][]byte, int64, error) { return nil, 0, fmt.Errorf("json: bool decoder does not support decode path") }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/goccy/go-json/internal/decoder/compile_race.go
vendor/github.com/goccy/go-json/internal/decoder/compile_race.go
//go:build race // +build race package decoder import ( "sync" "unsafe" "github.com/goccy/go-json/internal/runtime" ) var decMu sync.RWMutex func CompileToGetDecoder(typ *runtime.Type) (Decoder, error) { initDecoder() typeptr := uintptr(unsafe.Pointer(typ)) if typeptr > typeAddr.MaxTypeAddr { return compileToGetDecoderSlowPath(typeptr, typ) } index := (typeptr - typeAddr.BaseTypeAddr) >> typeAddr.AddrShift decMu.RLock() if dec := cachedDecoder[index]; dec != nil { decMu.RUnlock() return dec, nil } decMu.RUnlock() dec, err := compileHead(typ, map[uintptr]Decoder{}) if err != nil { return nil, err } decMu.Lock() cachedDecoder[index] = dec decMu.Unlock() return dec, nil }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/goccy/go-json/internal/decoder/invalid.go
vendor/github.com/goccy/go-json/internal/decoder/invalid.go
package decoder import ( "reflect" "unsafe" "github.com/goccy/go-json/internal/errors" "github.com/goccy/go-json/internal/runtime" ) type invalidDecoder struct { typ *runtime.Type kind reflect.Kind structName string fieldName string } func newInvalidDecoder(typ *runtime.Type, structName, fieldName string) *invalidDecoder { return &invalidDecoder{ typ: typ, kind: typ.Kind(), structName: structName, fieldName: fieldName, } } func (d *invalidDecoder) DecodeStream(s *Stream, depth int64, p unsafe.Pointer) error { return &errors.UnmarshalTypeError{ Value: "object", Type: runtime.RType2Type(d.typ), Offset: s.totalOffset(), Struct: d.structName, Field: d.fieldName, } } func (d *invalidDecoder) Decode(ctx *RuntimeContext, cursor, depth int64, p unsafe.Pointer) (int64, error) { return 0, &errors.UnmarshalTypeError{ Value: "object", Type: runtime.RType2Type(d.typ), Offset: cursor, Struct: d.structName, Field: d.fieldName, } } func (d *invalidDecoder) DecodePath(ctx *RuntimeContext, cursor, depth int64) ([][]byte, int64, error) { return nil, 0, &errors.UnmarshalTypeError{ Value: "object", Type: runtime.RType2Type(d.typ), Offset: cursor, Struct: d.structName, Field: d.fieldName, } }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/goccy/go-json/internal/decoder/anonymous_field.go
vendor/github.com/goccy/go-json/internal/decoder/anonymous_field.go
package decoder import ( "unsafe" "github.com/goccy/go-json/internal/runtime" ) type anonymousFieldDecoder struct { structType *runtime.Type offset uintptr dec Decoder } func newAnonymousFieldDecoder(structType *runtime.Type, offset uintptr, dec Decoder) *anonymousFieldDecoder { return &anonymousFieldDecoder{ structType: structType, offset: offset, dec: dec, } } func (d *anonymousFieldDecoder) DecodeStream(s *Stream, depth int64, p unsafe.Pointer) error { if *(*unsafe.Pointer)(p) == nil { *(*unsafe.Pointer)(p) = unsafe_New(d.structType) } p = *(*unsafe.Pointer)(p) return d.dec.DecodeStream(s, depth, unsafe.Pointer(uintptr(p)+d.offset)) } func (d *anonymousFieldDecoder) Decode(ctx *RuntimeContext, cursor, depth int64, p unsafe.Pointer) (int64, error) { if *(*unsafe.Pointer)(p) == nil { *(*unsafe.Pointer)(p) = unsafe_New(d.structType) } p = *(*unsafe.Pointer)(p) return d.dec.Decode(ctx, cursor, depth, unsafe.Pointer(uintptr(p)+d.offset)) } func (d *anonymousFieldDecoder) DecodePath(ctx *RuntimeContext, cursor, depth int64) ([][]byte, int64, error) { return d.dec.DecodePath(ctx, cursor, depth) }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/goccy/go-json/internal/decoder/stream.go
vendor/github.com/goccy/go-json/internal/decoder/stream.go
package decoder import ( "bytes" "encoding/json" "io" "strconv" "unsafe" "github.com/goccy/go-json/internal/errors" ) const ( initBufSize = 512 ) type Stream struct { buf []byte bufSize int64 length int64 r io.Reader offset int64 cursor int64 filledBuffer bool allRead bool UseNumber bool DisallowUnknownFields bool Option *Option } func NewStream(r io.Reader) *Stream { return &Stream{ r: r, bufSize: initBufSize, buf: make([]byte, initBufSize), Option: &Option{}, } } func (s *Stream) TotalOffset() int64 { return s.totalOffset() } func (s *Stream) Buffered() io.Reader { buflen := int64(len(s.buf)) for i := s.cursor; i < buflen; i++ { if s.buf[i] == nul { return bytes.NewReader(s.buf[s.cursor:i]) } } return bytes.NewReader(s.buf[s.cursor:]) } func (s *Stream) PrepareForDecode() error { for { switch s.char() { case ' ', '\t', '\r', '\n': s.cursor++ continue case ',', ':': s.cursor++ return nil case nul: if s.read() { continue } return io.EOF } break } return nil } func (s *Stream) totalOffset() int64 { return s.offset + s.cursor } func (s *Stream) char() byte { return s.buf[s.cursor] } func (s *Stream) equalChar(c byte) bool { cur := s.buf[s.cursor] if cur == nul { s.read() cur = s.buf[s.cursor] } return cur == c } func (s *Stream) stat() ([]byte, int64, unsafe.Pointer) { return s.buf, s.cursor, (*sliceHeader)(unsafe.Pointer(&s.buf)).data } func (s *Stream) bufptr() unsafe.Pointer { return (*sliceHeader)(unsafe.Pointer(&s.buf)).data } func (s *Stream) statForRetry() ([]byte, int64, unsafe.Pointer) { s.cursor-- // for retry ( because caller progress cursor position in each loop ) return s.buf, s.cursor, (*sliceHeader)(unsafe.Pointer(&s.buf)).data } func (s *Stream) Reset() { s.reset() s.bufSize = int64(len(s.buf)) } func (s *Stream) More() bool { for { switch s.char() { case ' ', '\n', '\r', '\t': s.cursor++ continue case '}', ']': return false case nul: if s.read() { continue } return false } break } return true } func (s *Stream) Token() (interface{}, error) { for { c := s.char() switch c { case ' ', '\n', '\r', '\t': s.cursor++ case '{', '[', ']', '}': s.cursor++ return json.Delim(c), nil case ',', ':': s.cursor++ case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': bytes := floatBytes(s) str := *(*string)(unsafe.Pointer(&bytes)) if s.UseNumber { return json.Number(str), nil } f64, err := strconv.ParseFloat(str, 64) if err != nil { return nil, err } return f64, nil case '"': bytes, err := stringBytes(s) if err != nil { return nil, err } return string(bytes), nil case 't': if err := trueBytes(s); err != nil { return nil, err } return true, nil case 'f': if err := falseBytes(s); err != nil { return nil, err } return false, nil case 'n': if err := nullBytes(s); err != nil { return nil, err } return nil, nil case nul: if s.read() { continue } goto END default: return nil, errors.ErrInvalidCharacter(s.char(), "token", s.totalOffset()) } } END: return nil, io.EOF } func (s *Stream) reset() { s.offset += s.cursor s.buf = s.buf[s.cursor:] s.length -= s.cursor s.cursor = 0 } func (s *Stream) readBuf() []byte { if s.filledBuffer { s.bufSize *= 2 remainBuf := s.buf s.buf = make([]byte, s.bufSize) copy(s.buf, remainBuf) } remainLen := s.length - s.cursor remainNotNulCharNum := int64(0) for i := int64(0); i < remainLen; i++ { if s.buf[s.cursor+i] == nul { break } remainNotNulCharNum++ } s.length = s.cursor + remainNotNulCharNum return s.buf[s.cursor+remainNotNulCharNum:] } func (s *Stream) read() bool { if s.allRead { return false } buf := s.readBuf() last := len(buf) - 1 buf[last] = nul n, err := s.r.Read(buf[:last]) s.length += int64(n) if n == last { s.filledBuffer = true } else { s.filledBuffer = false } if err == io.EOF { s.allRead = true } else if err != nil { return false } return true } func (s *Stream) skipWhiteSpace() byte { p := s.bufptr() LOOP: c := char(p, s.cursor) switch c { case ' ', '\n', '\t', '\r': s.cursor++ goto LOOP case nul: if s.read() { p = s.bufptr() goto LOOP } } return c } func (s *Stream) skipObject(depth int64) error { braceCount := 1 _, cursor, p := s.stat() for { switch char(p, cursor) { case '{': braceCount++ depth++ if depth > maxDecodeNestingDepth { return errors.ErrExceededMaxDepth(s.char(), s.cursor) } case '}': braceCount-- depth-- if braceCount == 0 { s.cursor = cursor + 1 return nil } case '[': depth++ if depth > maxDecodeNestingDepth { return errors.ErrExceededMaxDepth(s.char(), s.cursor) } case ']': depth-- case '"': for { cursor++ switch char(p, cursor) { case '\\': cursor++ if char(p, cursor) == nul { s.cursor = cursor if s.read() { _, cursor, p = s.stat() continue } return errors.ErrUnexpectedEndOfJSON("string of object", cursor) } case '"': goto SWITCH_OUT case nul: s.cursor = cursor if s.read() { _, cursor, p = s.statForRetry() continue } return errors.ErrUnexpectedEndOfJSON("string of object", cursor) } } case nul: s.cursor = cursor if s.read() { _, cursor, p = s.stat() continue } return errors.ErrUnexpectedEndOfJSON("object of object", cursor) } SWITCH_OUT: cursor++ } } func (s *Stream) skipArray(depth int64) error { bracketCount := 1 _, cursor, p := s.stat() for { switch char(p, cursor) { case '[': bracketCount++ depth++ if depth > maxDecodeNestingDepth { return errors.ErrExceededMaxDepth(s.char(), s.cursor) } case ']': bracketCount-- depth-- if bracketCount == 0 { s.cursor = cursor + 1 return nil } case '{': depth++ if depth > maxDecodeNestingDepth { return errors.ErrExceededMaxDepth(s.char(), s.cursor) } case '}': depth-- case '"': for { cursor++ switch char(p, cursor) { case '\\': cursor++ if char(p, cursor) == nul { s.cursor = cursor if s.read() { _, cursor, p = s.stat() continue } return errors.ErrUnexpectedEndOfJSON("string of object", cursor) } case '"': goto SWITCH_OUT case nul: s.cursor = cursor if s.read() { _, cursor, p = s.statForRetry() continue } return errors.ErrUnexpectedEndOfJSON("string of object", cursor) } } case nul: s.cursor = cursor if s.read() { _, cursor, p = s.stat() continue } return errors.ErrUnexpectedEndOfJSON("array of object", cursor) } SWITCH_OUT: cursor++ } } func (s *Stream) skipValue(depth int64) error { _, cursor, p := s.stat() for { switch char(p, cursor) { case ' ', '\n', '\t', '\r': cursor++ continue case nul: s.cursor = cursor if s.read() { _, cursor, p = s.stat() continue } return errors.ErrUnexpectedEndOfJSON("value of object", s.totalOffset()) case '{': s.cursor = cursor + 1 return s.skipObject(depth + 1) case '[': s.cursor = cursor + 1 return s.skipArray(depth + 1) case '"': for { cursor++ switch char(p, cursor) { case '\\': cursor++ if char(p, cursor) == nul { s.cursor = cursor if s.read() { _, cursor, p = s.stat() continue } return errors.ErrUnexpectedEndOfJSON("value of string", s.totalOffset()) } case '"': s.cursor = cursor + 1 return nil case nul: s.cursor = cursor if s.read() { _, cursor, p = s.statForRetry() continue } return errors.ErrUnexpectedEndOfJSON("value of string", s.totalOffset()) } } case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': for { cursor++ c := char(p, cursor) if floatTable[c] { continue } else if c == nul { if s.read() { _, cursor, p = s.stat() continue } } s.cursor = cursor return nil } case 't': s.cursor = cursor if err := trueBytes(s); err != nil { return err } return nil case 'f': s.cursor = cursor if err := falseBytes(s); err != nil { return err } return nil case 'n': s.cursor = cursor if err := nullBytes(s); err != nil { return err } return nil } cursor++ } } func nullBytes(s *Stream) error { // current cursor's character is 'n' s.cursor++ if s.char() != 'u' { if err := retryReadNull(s); err != nil { return err } } s.cursor++ if s.char() != 'l' { if err := retryReadNull(s); err != nil { return err } } s.cursor++ if s.char() != 'l' { if err := retryReadNull(s); err != nil { return err } } s.cursor++ return nil } func retryReadNull(s *Stream) error { if s.char() == nul && s.read() { return nil } return errors.ErrInvalidCharacter(s.char(), "null", s.totalOffset()) } func trueBytes(s *Stream) error { // current cursor's character is 't' s.cursor++ if s.char() != 'r' { if err := retryReadTrue(s); err != nil { return err } } s.cursor++ if s.char() != 'u' { if err := retryReadTrue(s); err != nil { return err } } s.cursor++ if s.char() != 'e' { if err := retryReadTrue(s); err != nil { return err } } s.cursor++ return nil } func retryReadTrue(s *Stream) error { if s.char() == nul && s.read() { return nil } return errors.ErrInvalidCharacter(s.char(), "bool(true)", s.totalOffset()) } func falseBytes(s *Stream) error { // current cursor's character is 'f' s.cursor++ if s.char() != 'a' { if err := retryReadFalse(s); err != nil { return err } } s.cursor++ if s.char() != 'l' { if err := retryReadFalse(s); err != nil { return err } } s.cursor++ if s.char() != 's' { if err := retryReadFalse(s); err != nil { return err } } s.cursor++ if s.char() != 'e' { if err := retryReadFalse(s); err != nil { return err } } s.cursor++ return nil } func retryReadFalse(s *Stream) error { if s.char() == nul && s.read() { return nil } return errors.ErrInvalidCharacter(s.char(), "bool(false)", s.totalOffset()) }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/goccy/go-json/internal/decoder/path.go
vendor/github.com/goccy/go-json/internal/decoder/path.go
package decoder import ( "fmt" "reflect" "strconv" "github.com/goccy/go-json/internal/errors" "github.com/goccy/go-json/internal/runtime" ) type PathString string func (s PathString) Build() (*Path, error) { builder := new(PathBuilder) return builder.Build([]rune(s)) } type PathBuilder struct { root PathNode node PathNode singleQuotePathSelector bool doubleQuotePathSelector bool } func (b *PathBuilder) Build(buf []rune) (*Path, error) { node, err := b.build(buf) if err != nil { return nil, err } return &Path{ node: node, RootSelectorOnly: node == nil, SingleQuotePathSelector: b.singleQuotePathSelector, DoubleQuotePathSelector: b.doubleQuotePathSelector, }, nil } func (b *PathBuilder) build(buf []rune) (PathNode, error) { if len(buf) == 0 { return nil, errors.ErrEmptyPath() } if buf[0] != '$' { return nil, errors.ErrInvalidPath("JSON Path must start with a $ character") } if len(buf) == 1 { return nil, nil } buf = buf[1:] offset, err := b.buildNext(buf) if err != nil { return nil, err } if len(buf) > offset { return nil, errors.ErrInvalidPath("remain invalid path %q", buf[offset:]) } return b.root, nil } func (b *PathBuilder) buildNextCharIfExists(buf []rune, cursor int) (int, error) { if len(buf) > cursor { offset, err := b.buildNext(buf[cursor:]) if err != nil { return 0, err } return cursor + 1 + offset, nil } return cursor, nil } func (b *PathBuilder) buildNext(buf []rune) (int, error) { switch buf[0] { case '.': if len(buf) == 1 { return 0, errors.ErrInvalidPath("JSON Path ends with dot character") } offset, err := b.buildSelector(buf[1:]) if err != nil { return 0, err } return offset + 1, nil case '[': if len(buf) == 1 { return 0, errors.ErrInvalidPath("JSON Path ends with left bracket character") } offset, err := b.buildIndex(buf[1:]) if err != nil { return 0, err } return offset + 1, nil default: return 0, errors.ErrInvalidPath("expect dot or left bracket character. but found %c character", buf[0]) } } func (b *PathBuilder) buildSelector(buf []rune) (int, error) { switch buf[0] { case '.': if len(buf) == 1 { return 0, errors.ErrInvalidPath("JSON Path ends with double dot character") } offset, err := b.buildPathRecursive(buf[1:]) if err != nil { return 0, err } return 1 + offset, nil case '[', ']', '$', '*': return 0, errors.ErrInvalidPath("found invalid path character %c after dot", buf[0]) } for cursor := 0; cursor < len(buf); cursor++ { switch buf[cursor] { case '$', '*', ']': return 0, errors.ErrInvalidPath("found %c character in field selector context", buf[cursor]) case '.': if cursor+1 >= len(buf) { return 0, errors.ErrInvalidPath("JSON Path ends with dot character") } selector := buf[:cursor] b.addSelectorNode(string(selector)) offset, err := b.buildSelector(buf[cursor+1:]) if err != nil { return 0, err } return cursor + 1 + offset, nil case '[': if cursor+1 >= len(buf) { return 0, errors.ErrInvalidPath("JSON Path ends with left bracket character") } selector := buf[:cursor] b.addSelectorNode(string(selector)) offset, err := b.buildIndex(buf[cursor+1:]) if err != nil { return 0, err } return cursor + 1 + offset, nil case '"': if cursor+1 >= len(buf) { return 0, errors.ErrInvalidPath("JSON Path ends with double quote character") } offset, err := b.buildQuoteSelector(buf[cursor+1:], DoubleQuotePathSelector) if err != nil { return 0, err } return cursor + 1 + offset, nil } } b.addSelectorNode(string(buf)) return len(buf), nil } func (b *PathBuilder) buildQuoteSelector(buf []rune, sel QuotePathSelector) (int, error) { switch buf[0] { case '[', ']', '$', '.', '*', '\'', '"': return 0, errors.ErrInvalidPath("found invalid path character %c after quote", buf[0]) } for cursor := 0; cursor < len(buf); cursor++ { switch buf[cursor] { case '\'': if sel != SingleQuotePathSelector { return 0, errors.ErrInvalidPath("found double quote character in field selector with single quote context") } if len(buf) <= cursor+1 { return 0, errors.ErrInvalidPath("JSON Path ends with single quote character in field selector context") } if buf[cursor+1] != ']' { return 0, errors.ErrInvalidPath("expect right bracket for field selector with single quote but found %c", buf[cursor+1]) } selector := buf[:cursor] b.addSelectorNode(string(selector)) b.singleQuotePathSelector = true return b.buildNextCharIfExists(buf, cursor+2) case '"': if sel != DoubleQuotePathSelector { return 0, errors.ErrInvalidPath("found single quote character in field selector with double quote context") } selector := buf[:cursor] b.addSelectorNode(string(selector)) b.doubleQuotePathSelector = true return b.buildNextCharIfExists(buf, cursor+1) } } return 0, errors.ErrInvalidPath("couldn't find quote character in selector quote path context") } func (b *PathBuilder) buildPathRecursive(buf []rune) (int, error) { switch buf[0] { case '.', '[', ']', '$', '*': return 0, errors.ErrInvalidPath("found invalid path character %c after double dot", buf[0]) } for cursor := 0; cursor < len(buf); cursor++ { switch buf[cursor] { case '$', '*', ']': return 0, errors.ErrInvalidPath("found %c character in field selector context", buf[cursor]) case '.': if cursor+1 >= len(buf) { return 0, errors.ErrInvalidPath("JSON Path ends with dot character") } selector := buf[:cursor] b.addRecursiveNode(string(selector)) offset, err := b.buildSelector(buf[cursor+1:]) if err != nil { return 0, err } return cursor + 1 + offset, nil case '[': if cursor+1 >= len(buf) { return 0, errors.ErrInvalidPath("JSON Path ends with left bracket character") } selector := buf[:cursor] b.addRecursiveNode(string(selector)) offset, err := b.buildIndex(buf[cursor+1:]) if err != nil { return 0, err } return cursor + 1 + offset, nil } } b.addRecursiveNode(string(buf)) return len(buf), nil } func (b *PathBuilder) buildIndex(buf []rune) (int, error) { switch buf[0] { case '.', '[', ']', '$': return 0, errors.ErrInvalidPath("found invalid path character %c after left bracket", buf[0]) case '\'': if len(buf) == 1 { return 0, errors.ErrInvalidPath("JSON Path ends with single quote character") } offset, err := b.buildQuoteSelector(buf[1:], SingleQuotePathSelector) if err != nil { return 0, err } return 1 + offset, nil case '*': if len(buf) == 1 { return 0, errors.ErrInvalidPath("JSON Path ends with star character") } if buf[1] != ']' { return 0, errors.ErrInvalidPath("expect right bracket character for index all path but found %c character", buf[1]) } b.addIndexAllNode() offset := len("*]") if len(buf) > 2 { buildOffset, err := b.buildNext(buf[2:]) if err != nil { return 0, err } return offset + buildOffset, nil } return offset, nil } for cursor := 0; cursor < len(buf); cursor++ { switch buf[cursor] { case ']': index, err := strconv.ParseInt(string(buf[:cursor]), 10, 64) if err != nil { return 0, errors.ErrInvalidPath("%q is unexpected index path", buf[:cursor]) } b.addIndexNode(int(index)) return b.buildNextCharIfExists(buf, cursor+1) } } return 0, errors.ErrInvalidPath("couldn't find right bracket character in index path context") } func (b *PathBuilder) addIndexAllNode() { node := newPathIndexAllNode() if b.root == nil { b.root = node b.node = node } else { b.node = b.node.chain(node) } } func (b *PathBuilder) addRecursiveNode(selector string) { node := newPathRecursiveNode(selector) if b.root == nil { b.root = node b.node = node } else { b.node = b.node.chain(node) } } func (b *PathBuilder) addSelectorNode(name string) { node := newPathSelectorNode(name) if b.root == nil { b.root = node b.node = node } else { b.node = b.node.chain(node) } } func (b *PathBuilder) addIndexNode(idx int) { node := newPathIndexNode(idx) if b.root == nil { b.root = node b.node = node } else { b.node = b.node.chain(node) } } type QuotePathSelector int const ( SingleQuotePathSelector QuotePathSelector = 1 DoubleQuotePathSelector QuotePathSelector = 2 ) type Path struct { node PathNode RootSelectorOnly bool SingleQuotePathSelector bool DoubleQuotePathSelector bool } func (p *Path) Field(sel string) (PathNode, bool, error) { if p.node == nil { return nil, false, nil } return p.node.Field(sel) } func (p *Path) Get(src, dst reflect.Value) error { if p.node == nil { return nil } return p.node.Get(src, dst) } func (p *Path) String() string { if p.node == nil { return "$" } return p.node.String() } type PathNode interface { fmt.Stringer Index(idx int) (PathNode, bool, error) Field(fieldName string) (PathNode, bool, error) Get(src, dst reflect.Value) error chain(PathNode) PathNode target() bool single() bool } type BasePathNode struct { child PathNode } func (n *BasePathNode) chain(node PathNode) PathNode { n.child = node return node } func (n *BasePathNode) target() bool { return n.child == nil } func (n *BasePathNode) single() bool { return true } type PathSelectorNode struct { *BasePathNode selector string } func newPathSelectorNode(selector string) *PathSelectorNode { return &PathSelectorNode{ BasePathNode: &BasePathNode{}, selector: selector, } } func (n *PathSelectorNode) Index(idx int) (PathNode, bool, error) { return nil, false, &errors.PathError{} } func (n *PathSelectorNode) Field(fieldName string) (PathNode, bool, error) { if n.selector == fieldName { return n.child, true, nil } return nil, false, nil } func (n *PathSelectorNode) Get(src, dst reflect.Value) error { switch src.Type().Kind() { case reflect.Map: iter := src.MapRange() for iter.Next() { key, ok := iter.Key().Interface().(string) if !ok { return fmt.Errorf("invalid map key type %T", src.Type().Key()) } child, found, err := n.Field(key) if err != nil { return err } if found { if child != nil { return child.Get(iter.Value(), dst) } return AssignValue(iter.Value(), dst) } } case reflect.Struct: typ := src.Type() for i := 0; i < typ.Len(); i++ { tag := runtime.StructTagFromField(typ.Field(i)) child, found, err := n.Field(tag.Key) if err != nil { return err } if found { if child != nil { return child.Get(src.Field(i), dst) } return AssignValue(src.Field(i), dst) } } case reflect.Ptr: return n.Get(src.Elem(), dst) case reflect.Interface: return n.Get(reflect.ValueOf(src.Interface()), dst) case reflect.Float64, reflect.String, reflect.Bool: return AssignValue(src, dst) } return fmt.Errorf("failed to get %s value from %s", n.selector, src.Type()) } func (n *PathSelectorNode) String() string { s := fmt.Sprintf(".%s", n.selector) if n.child != nil { s += n.child.String() } return s } type PathIndexNode struct { *BasePathNode selector int } func newPathIndexNode(selector int) *PathIndexNode { return &PathIndexNode{ BasePathNode: &BasePathNode{}, selector: selector, } } func (n *PathIndexNode) Index(idx int) (PathNode, bool, error) { if n.selector == idx { return n.child, true, nil } return nil, false, nil } func (n *PathIndexNode) Field(fieldName string) (PathNode, bool, error) { return nil, false, &errors.PathError{} } func (n *PathIndexNode) Get(src, dst reflect.Value) error { switch src.Type().Kind() { case reflect.Array, reflect.Slice: if src.Len() > n.selector { if n.child != nil { return n.child.Get(src.Index(n.selector), dst) } return AssignValue(src.Index(n.selector), dst) } case reflect.Ptr: return n.Get(src.Elem(), dst) case reflect.Interface: return n.Get(reflect.ValueOf(src.Interface()), dst) } return fmt.Errorf("failed to get [%d] value from %s", n.selector, src.Type()) } func (n *PathIndexNode) String() string { s := fmt.Sprintf("[%d]", n.selector) if n.child != nil { s += n.child.String() } return s } type PathIndexAllNode struct { *BasePathNode } func newPathIndexAllNode() *PathIndexAllNode { return &PathIndexAllNode{ BasePathNode: &BasePathNode{}, } } func (n *PathIndexAllNode) Index(idx int) (PathNode, bool, error) { return n.child, true, nil } func (n *PathIndexAllNode) Field(fieldName string) (PathNode, bool, error) { return nil, false, &errors.PathError{} } func (n *PathIndexAllNode) Get(src, dst reflect.Value) error { switch src.Type().Kind() { case reflect.Array, reflect.Slice: var arr []interface{} for i := 0; i < src.Len(); i++ { var v interface{} rv := reflect.ValueOf(&v) if n.child != nil { if err := n.child.Get(src.Index(i), rv); err != nil { return err } } else { if err := AssignValue(src.Index(i), rv); err != nil { return err } } arr = append(arr, v) } if err := AssignValue(reflect.ValueOf(arr), dst); err != nil { return err } return nil case reflect.Ptr: return n.Get(src.Elem(), dst) case reflect.Interface: return n.Get(reflect.ValueOf(src.Interface()), dst) } return fmt.Errorf("failed to get all value from %s", src.Type()) } func (n *PathIndexAllNode) String() string { s := "[*]" if n.child != nil { s += n.child.String() } return s } type PathRecursiveNode struct { *BasePathNode selector string } func newPathRecursiveNode(selector string) *PathRecursiveNode { node := newPathSelectorNode(selector) return &PathRecursiveNode{ BasePathNode: &BasePathNode{ child: node, }, selector: selector, } } func (n *PathRecursiveNode) Field(fieldName string) (PathNode, bool, error) { if n.selector == fieldName { return n.child, true, nil } return nil, false, nil } func (n *PathRecursiveNode) Index(_ int) (PathNode, bool, error) { return n, true, nil } func valueToSliceValue(v interface{}) []interface{} { rv := reflect.ValueOf(v) ret := []interface{}{} if rv.Type().Kind() == reflect.Slice || rv.Type().Kind() == reflect.Array { for i := 0; i < rv.Len(); i++ { ret = append(ret, rv.Index(i).Interface()) } return ret } return []interface{}{v} } func (n *PathRecursiveNode) Get(src, dst reflect.Value) error { if n.child == nil { return fmt.Errorf("failed to get by recursive path ..%s", n.selector) } var arr []interface{} switch src.Type().Kind() { case reflect.Map: iter := src.MapRange() for iter.Next() { key, ok := iter.Key().Interface().(string) if !ok { return fmt.Errorf("invalid map key type %T", src.Type().Key()) } child, found, err := n.Field(key) if err != nil { return err } if found { var v interface{} rv := reflect.ValueOf(&v) _ = child.Get(iter.Value(), rv) arr = append(arr, valueToSliceValue(v)...) } else { var v interface{} rv := reflect.ValueOf(&v) _ = n.Get(iter.Value(), rv) if v != nil { arr = append(arr, valueToSliceValue(v)...) } } } _ = AssignValue(reflect.ValueOf(arr), dst) return nil case reflect.Struct: typ := src.Type() for i := 0; i < typ.Len(); i++ { tag := runtime.StructTagFromField(typ.Field(i)) child, found, err := n.Field(tag.Key) if err != nil { return err } if found { var v interface{} rv := reflect.ValueOf(&v) _ = child.Get(src.Field(i), rv) arr = append(arr, valueToSliceValue(v)...) } else { var v interface{} rv := reflect.ValueOf(&v) _ = n.Get(src.Field(i), rv) if v != nil { arr = append(arr, valueToSliceValue(v)...) } } } _ = AssignValue(reflect.ValueOf(arr), dst) return nil case reflect.Array, reflect.Slice: for i := 0; i < src.Len(); i++ { var v interface{} rv := reflect.ValueOf(&v) _ = n.Get(src.Index(i), rv) if v != nil { arr = append(arr, valueToSliceValue(v)...) } } _ = AssignValue(reflect.ValueOf(arr), dst) return nil case reflect.Ptr: return n.Get(src.Elem(), dst) case reflect.Interface: return n.Get(reflect.ValueOf(src.Interface()), dst) } return fmt.Errorf("failed to get %s value from %s", n.selector, src.Type()) } func (n *PathRecursiveNode) String() string { s := fmt.Sprintf("..%s", n.selector) if n.child != nil { s += n.child.String() } return s }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/goccy/go-json/internal/decoder/option.go
vendor/github.com/goccy/go-json/internal/decoder/option.go
package decoder import "context" type OptionFlags uint8 const ( FirstWinOption OptionFlags = 1 << iota ContextOption PathOption ) type Option struct { Flags OptionFlags Context context.Context Path *Path }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/goccy/go-json/internal/decoder/struct.go
vendor/github.com/goccy/go-json/internal/decoder/struct.go
package decoder import ( "fmt" "math" "math/bits" "sort" "strings" "unicode" "unicode/utf16" "unsafe" "github.com/goccy/go-json/internal/errors" ) type structFieldSet struct { dec Decoder offset uintptr isTaggedKey bool fieldIdx int key string keyLen int64 err error } type structDecoder struct { fieldMap map[string]*structFieldSet fieldUniqueNameNum int stringDecoder *stringDecoder structName string fieldName string isTriedOptimize bool keyBitmapUint8 [][256]uint8 keyBitmapUint16 [][256]uint16 sortedFieldSets []*structFieldSet keyDecoder func(*structDecoder, []byte, int64) (int64, *structFieldSet, error) keyStreamDecoder func(*structDecoder, *Stream) (*structFieldSet, string, error) } var ( largeToSmallTable [256]byte ) func init() { for i := 0; i < 256; i++ { c := i if 'A' <= c && c <= 'Z' { c += 'a' - 'A' } largeToSmallTable[i] = byte(c) } } func toASCIILower(s string) string { b := []byte(s) for i := range b { b[i] = largeToSmallTable[b[i]] } return string(b) } func newStructDecoder(structName, fieldName string, fieldMap map[string]*structFieldSet) *structDecoder { return &structDecoder{ fieldMap: fieldMap, stringDecoder: newStringDecoder(structName, fieldName), structName: structName, fieldName: fieldName, keyDecoder: decodeKey, keyStreamDecoder: decodeKeyStream, } } const ( allowOptimizeMaxKeyLen = 64 allowOptimizeMaxFieldLen = 16 ) func (d *structDecoder) tryOptimize() { fieldUniqueNameMap := map[string]int{} fieldIdx := -1 for k, v := range d.fieldMap { lower := strings.ToLower(k) idx, exists := fieldUniqueNameMap[lower] if exists { v.fieldIdx = idx } else { fieldIdx++ v.fieldIdx = fieldIdx } fieldUniqueNameMap[lower] = fieldIdx } d.fieldUniqueNameNum = len(fieldUniqueNameMap) if d.isTriedOptimize { return } fieldMap := map[string]*structFieldSet{} conflicted := map[string]struct{}{} for k, v := range d.fieldMap { key := strings.ToLower(k) if key != k { if key != toASCIILower(k) { d.isTriedOptimize = true return } // already exists same key (e.g. Hello and HELLO has same lower case key if _, exists := conflicted[key]; exists { d.isTriedOptimize = true return } conflicted[key] = struct{}{} } if field, exists := fieldMap[key]; exists { if field != v { d.isTriedOptimize = true return } } fieldMap[key] = v } if len(fieldMap) > allowOptimizeMaxFieldLen { d.isTriedOptimize = true return } var maxKeyLen int sortedKeys := []string{} for key := range fieldMap { keyLen := len(key) if keyLen > allowOptimizeMaxKeyLen { d.isTriedOptimize = true return } if maxKeyLen < keyLen { maxKeyLen = keyLen } sortedKeys = append(sortedKeys, key) } sort.Strings(sortedKeys) // By allocating one extra capacity than `maxKeyLen`, // it is possible to avoid the process of comparing the index of the key with the length of the bitmap each time. bitmapLen := maxKeyLen + 1 if len(sortedKeys) <= 8 { keyBitmap := make([][256]uint8, bitmapLen) for i, key := range sortedKeys { for j := 0; j < len(key); j++ { c := key[j] keyBitmap[j][c] |= (1 << uint(i)) } d.sortedFieldSets = append(d.sortedFieldSets, fieldMap[key]) } d.keyBitmapUint8 = keyBitmap d.keyDecoder = decodeKeyByBitmapUint8 d.keyStreamDecoder = decodeKeyByBitmapUint8Stream } else { keyBitmap := make([][256]uint16, bitmapLen) for i, key := range sortedKeys { for j := 0; j < len(key); j++ { c := key[j] keyBitmap[j][c] |= (1 << uint(i)) } d.sortedFieldSets = append(d.sortedFieldSets, fieldMap[key]) } d.keyBitmapUint16 = keyBitmap d.keyDecoder = decodeKeyByBitmapUint16 d.keyStreamDecoder = decodeKeyByBitmapUint16Stream } } // decode from '\uXXXX' func decodeKeyCharByUnicodeRune(buf []byte, cursor int64) ([]byte, int64, error) { const defaultOffset = 4 const surrogateOffset = 6 if cursor+defaultOffset >= int64(len(buf)) { return nil, 0, errors.ErrUnexpectedEndOfJSON("escaped string", cursor) } r := unicodeToRune(buf[cursor : cursor+defaultOffset]) if utf16.IsSurrogate(r) { cursor += defaultOffset if cursor+surrogateOffset >= int64(len(buf)) || buf[cursor] != '\\' || buf[cursor+1] != 'u' { return []byte(string(unicode.ReplacementChar)), cursor + defaultOffset - 1, nil } cursor += 2 r2 := unicodeToRune(buf[cursor : cursor+defaultOffset]) if r := utf16.DecodeRune(r, r2); r != unicode.ReplacementChar { return []byte(string(r)), cursor + defaultOffset - 1, nil } } return []byte(string(r)), cursor + defaultOffset - 1, nil } func decodeKeyCharByEscapedChar(buf []byte, cursor int64) ([]byte, int64, error) { c := buf[cursor] cursor++ switch c { case '"': return []byte{'"'}, cursor, nil case '\\': return []byte{'\\'}, cursor, nil case '/': return []byte{'/'}, cursor, nil case 'b': return []byte{'\b'}, cursor, nil case 'f': return []byte{'\f'}, cursor, nil case 'n': return []byte{'\n'}, cursor, nil case 'r': return []byte{'\r'}, cursor, nil case 't': return []byte{'\t'}, cursor, nil case 'u': return decodeKeyCharByUnicodeRune(buf, cursor) } return nil, cursor, nil } func decodeKeyByBitmapUint8(d *structDecoder, buf []byte, cursor int64) (int64, *structFieldSet, error) { var ( curBit uint8 = math.MaxUint8 ) b := (*sliceHeader)(unsafe.Pointer(&buf)).data for { switch char(b, cursor) { case ' ', '\n', '\t', '\r': cursor++ case '"': cursor++ c := char(b, cursor) switch c { case '"': cursor++ return cursor, nil, nil case nul: return 0, nil, errors.ErrUnexpectedEndOfJSON("string", cursor) } keyIdx := 0 bitmap := d.keyBitmapUint8 start := cursor for { c := char(b, cursor) switch c { case '"': fieldSetIndex := bits.TrailingZeros8(curBit) field := d.sortedFieldSets[fieldSetIndex] keyLen := cursor - start cursor++ if keyLen < field.keyLen { // early match return cursor, nil, nil } return cursor, field, nil case nul: return 0, nil, errors.ErrUnexpectedEndOfJSON("string", cursor) case '\\': cursor++ chars, nextCursor, err := decodeKeyCharByEscapedChar(buf, cursor) if err != nil { return 0, nil, err } for _, c := range chars { curBit &= bitmap[keyIdx][largeToSmallTable[c]] if curBit == 0 { return decodeKeyNotFound(b, cursor) } keyIdx++ } cursor = nextCursor default: curBit &= bitmap[keyIdx][largeToSmallTable[c]] if curBit == 0 { return decodeKeyNotFound(b, cursor) } keyIdx++ } cursor++ } default: return cursor, nil, errors.ErrInvalidBeginningOfValue(char(b, cursor), cursor) } } } func decodeKeyByBitmapUint16(d *structDecoder, buf []byte, cursor int64) (int64, *structFieldSet, error) { var ( curBit uint16 = math.MaxUint16 ) b := (*sliceHeader)(unsafe.Pointer(&buf)).data for { switch char(b, cursor) { case ' ', '\n', '\t', '\r': cursor++ case '"': cursor++ c := char(b, cursor) switch c { case '"': cursor++ return cursor, nil, nil case nul: return 0, nil, errors.ErrUnexpectedEndOfJSON("string", cursor) } keyIdx := 0 bitmap := d.keyBitmapUint16 start := cursor for { c := char(b, cursor) switch c { case '"': fieldSetIndex := bits.TrailingZeros16(curBit) field := d.sortedFieldSets[fieldSetIndex] keyLen := cursor - start cursor++ if keyLen < field.keyLen { // early match return cursor, nil, nil } return cursor, field, nil case nul: return 0, nil, errors.ErrUnexpectedEndOfJSON("string", cursor) case '\\': cursor++ chars, nextCursor, err := decodeKeyCharByEscapedChar(buf, cursor) if err != nil { return 0, nil, err } for _, c := range chars { curBit &= bitmap[keyIdx][largeToSmallTable[c]] if curBit == 0 { return decodeKeyNotFound(b, cursor) } keyIdx++ } cursor = nextCursor default: curBit &= bitmap[keyIdx][largeToSmallTable[c]] if curBit == 0 { return decodeKeyNotFound(b, cursor) } keyIdx++ } cursor++ } default: return cursor, nil, errors.ErrInvalidBeginningOfValue(char(b, cursor), cursor) } } } func decodeKeyNotFound(b unsafe.Pointer, cursor int64) (int64, *structFieldSet, error) { for { cursor++ switch char(b, cursor) { case '"': cursor++ return cursor, nil, nil case '\\': cursor++ if char(b, cursor) == nul { return 0, nil, errors.ErrUnexpectedEndOfJSON("string", cursor) } case nul: return 0, nil, errors.ErrUnexpectedEndOfJSON("string", cursor) } } } func decodeKey(d *structDecoder, buf []byte, cursor int64) (int64, *structFieldSet, error) { key, c, err := d.stringDecoder.decodeByte(buf, cursor) if err != nil { return 0, nil, err } cursor = c k := *(*string)(unsafe.Pointer(&key)) field, exists := d.fieldMap[k] if !exists { return cursor, nil, nil } return cursor, field, nil } func decodeKeyByBitmapUint8Stream(d *structDecoder, s *Stream) (*structFieldSet, string, error) { var ( curBit uint8 = math.MaxUint8 ) _, cursor, p := s.stat() for { switch char(p, cursor) { case ' ', '\n', '\t', '\r': cursor++ case nul: s.cursor = cursor if s.read() { _, cursor, p = s.stat() continue } return nil, "", errors.ErrInvalidBeginningOfValue(char(p, cursor), s.totalOffset()) case '"': cursor++ FIRST_CHAR: start := cursor switch char(p, cursor) { case '"': cursor++ s.cursor = cursor return nil, "", nil case nul: s.cursor = cursor if s.read() { _, cursor, p = s.stat() goto FIRST_CHAR } return nil, "", errors.ErrUnexpectedEndOfJSON("string", s.totalOffset()) } keyIdx := 0 bitmap := d.keyBitmapUint8 for { c := char(p, cursor) switch c { case '"': fieldSetIndex := bits.TrailingZeros8(curBit) field := d.sortedFieldSets[fieldSetIndex] keyLen := cursor - start cursor++ s.cursor = cursor if keyLen < field.keyLen { // early match return nil, field.key, nil } return field, field.key, nil case nul: s.cursor = cursor if s.read() { _, cursor, p = s.stat() continue } return nil, "", errors.ErrUnexpectedEndOfJSON("string", s.totalOffset()) case '\\': s.cursor = cursor + 1 // skip '\' char chars, err := decodeKeyCharByEscapeCharStream(s) if err != nil { return nil, "", err } cursor = s.cursor for _, c := range chars { curBit &= bitmap[keyIdx][largeToSmallTable[c]] if curBit == 0 { s.cursor = cursor return decodeKeyNotFoundStream(s, start) } keyIdx++ } default: curBit &= bitmap[keyIdx][largeToSmallTable[c]] if curBit == 0 { s.cursor = cursor return decodeKeyNotFoundStream(s, start) } keyIdx++ } cursor++ } default: return nil, "", errors.ErrInvalidBeginningOfValue(char(p, cursor), s.totalOffset()) } } } func decodeKeyByBitmapUint16Stream(d *structDecoder, s *Stream) (*structFieldSet, string, error) { var ( curBit uint16 = math.MaxUint16 ) _, cursor, p := s.stat() for { switch char(p, cursor) { case ' ', '\n', '\t', '\r': cursor++ case nul: s.cursor = cursor if s.read() { _, cursor, p = s.stat() continue } return nil, "", errors.ErrInvalidBeginningOfValue(char(p, cursor), s.totalOffset()) case '"': cursor++ FIRST_CHAR: start := cursor switch char(p, cursor) { case '"': cursor++ s.cursor = cursor return nil, "", nil case nul: s.cursor = cursor if s.read() { _, cursor, p = s.stat() goto FIRST_CHAR } return nil, "", errors.ErrUnexpectedEndOfJSON("string", s.totalOffset()) } keyIdx := 0 bitmap := d.keyBitmapUint16 for { c := char(p, cursor) switch c { case '"': fieldSetIndex := bits.TrailingZeros16(curBit) field := d.sortedFieldSets[fieldSetIndex] keyLen := cursor - start cursor++ s.cursor = cursor if keyLen < field.keyLen { // early match return nil, field.key, nil } return field, field.key, nil case nul: s.cursor = cursor if s.read() { _, cursor, p = s.stat() continue } return nil, "", errors.ErrUnexpectedEndOfJSON("string", s.totalOffset()) case '\\': s.cursor = cursor + 1 // skip '\' char chars, err := decodeKeyCharByEscapeCharStream(s) if err != nil { return nil, "", err } cursor = s.cursor for _, c := range chars { curBit &= bitmap[keyIdx][largeToSmallTable[c]] if curBit == 0 { s.cursor = cursor return decodeKeyNotFoundStream(s, start) } keyIdx++ } default: curBit &= bitmap[keyIdx][largeToSmallTable[c]] if curBit == 0 { s.cursor = cursor return decodeKeyNotFoundStream(s, start) } keyIdx++ } cursor++ } default: return nil, "", errors.ErrInvalidBeginningOfValue(char(p, cursor), s.totalOffset()) } } } // decode from '\uXXXX' func decodeKeyCharByUnicodeRuneStream(s *Stream) ([]byte, error) { const defaultOffset = 4 const surrogateOffset = 6 if s.cursor+defaultOffset >= s.length { if !s.read() { return nil, errors.ErrInvalidCharacter(s.char(), "escaped unicode char", s.totalOffset()) } } r := unicodeToRune(s.buf[s.cursor : s.cursor+defaultOffset]) if utf16.IsSurrogate(r) { s.cursor += defaultOffset if s.cursor+surrogateOffset >= s.length { s.read() } if s.cursor+surrogateOffset >= s.length || s.buf[s.cursor] != '\\' || s.buf[s.cursor+1] != 'u' { s.cursor += defaultOffset - 1 return []byte(string(unicode.ReplacementChar)), nil } r2 := unicodeToRune(s.buf[s.cursor+defaultOffset+2 : s.cursor+surrogateOffset]) if r := utf16.DecodeRune(r, r2); r != unicode.ReplacementChar { s.cursor += defaultOffset - 1 return []byte(string(r)), nil } } s.cursor += defaultOffset - 1 return []byte(string(r)), nil } func decodeKeyCharByEscapeCharStream(s *Stream) ([]byte, error) { c := s.buf[s.cursor] s.cursor++ RETRY: switch c { case '"': return []byte{'"'}, nil case '\\': return []byte{'\\'}, nil case '/': return []byte{'/'}, nil case 'b': return []byte{'\b'}, nil case 'f': return []byte{'\f'}, nil case 'n': return []byte{'\n'}, nil case 'r': return []byte{'\r'}, nil case 't': return []byte{'\t'}, nil case 'u': return decodeKeyCharByUnicodeRuneStream(s) case nul: if !s.read() { return nil, errors.ErrInvalidCharacter(s.char(), "escaped char", s.totalOffset()) } goto RETRY default: return nil, errors.ErrUnexpectedEndOfJSON("struct field", s.totalOffset()) } } func decodeKeyNotFoundStream(s *Stream, start int64) (*structFieldSet, string, error) { buf, cursor, p := s.stat() for { cursor++ switch char(p, cursor) { case '"': b := buf[start:cursor] key := *(*string)(unsafe.Pointer(&b)) cursor++ s.cursor = cursor return nil, key, nil case '\\': cursor++ if char(p, cursor) == nul { s.cursor = cursor if !s.read() { return nil, "", errors.ErrUnexpectedEndOfJSON("string", s.totalOffset()) } buf, cursor, p = s.statForRetry() } case nul: s.cursor = cursor if !s.read() { return nil, "", errors.ErrUnexpectedEndOfJSON("string", s.totalOffset()) } buf, cursor, p = s.statForRetry() } } } func decodeKeyStream(d *structDecoder, s *Stream) (*structFieldSet, string, error) { key, err := d.stringDecoder.decodeStreamByte(s) if err != nil { return nil, "", err } k := *(*string)(unsafe.Pointer(&key)) return d.fieldMap[k], k, nil } func (d *structDecoder) DecodeStream(s *Stream, depth int64, p unsafe.Pointer) error { depth++ if depth > maxDecodeNestingDepth { return errors.ErrExceededMaxDepth(s.char(), s.cursor) } c := s.skipWhiteSpace() switch c { case 'n': if err := nullBytes(s); err != nil { return err } return nil default: if s.char() != '{' { return errors.ErrInvalidBeginningOfValue(s.char(), s.totalOffset()) } } s.cursor++ if s.skipWhiteSpace() == '}' { s.cursor++ return nil } var ( seenFields map[int]struct{} seenFieldNum int ) firstWin := (s.Option.Flags & FirstWinOption) != 0 if firstWin { seenFields = make(map[int]struct{}, d.fieldUniqueNameNum) } for { s.reset() field, key, err := d.keyStreamDecoder(d, s) if err != nil { return err } if s.skipWhiteSpace() != ':' { return errors.ErrExpected("colon after object key", s.totalOffset()) } s.cursor++ if field != nil { if field.err != nil { return field.err } if firstWin { if _, exists := seenFields[field.fieldIdx]; exists { if err := s.skipValue(depth); err != nil { return err } } else { if err := field.dec.DecodeStream(s, depth, unsafe.Pointer(uintptr(p)+field.offset)); err != nil { return err } seenFieldNum++ if d.fieldUniqueNameNum <= seenFieldNum { return s.skipObject(depth) } seenFields[field.fieldIdx] = struct{}{} } } else { if err := field.dec.DecodeStream(s, depth, unsafe.Pointer(uintptr(p)+field.offset)); err != nil { return err } } } else if s.DisallowUnknownFields { return fmt.Errorf("json: unknown field %q", key) } else { if err := s.skipValue(depth); err != nil { return err } } c := s.skipWhiteSpace() if c == '}' { s.cursor++ return nil } if c != ',' { return errors.ErrExpected("comma after object element", s.totalOffset()) } s.cursor++ } } func (d *structDecoder) Decode(ctx *RuntimeContext, cursor, depth int64, p unsafe.Pointer) (int64, error) { buf := ctx.Buf depth++ if depth > maxDecodeNestingDepth { return 0, errors.ErrExceededMaxDepth(buf[cursor], cursor) } buflen := int64(len(buf)) cursor = skipWhiteSpace(buf, cursor) b := (*sliceHeader)(unsafe.Pointer(&buf)).data switch char(b, cursor) { case 'n': if err := validateNull(buf, cursor); err != nil { return 0, err } cursor += 4 return cursor, nil case '{': default: return 0, errors.ErrInvalidBeginningOfValue(char(b, cursor), cursor) } cursor++ cursor = skipWhiteSpace(buf, cursor) if buf[cursor] == '}' { cursor++ return cursor, nil } var ( seenFields map[int]struct{} seenFieldNum int ) firstWin := (ctx.Option.Flags & FirstWinOption) != 0 if firstWin { seenFields = make(map[int]struct{}, d.fieldUniqueNameNum) } for { c, field, err := d.keyDecoder(d, buf, cursor) if err != nil { return 0, err } cursor = skipWhiteSpace(buf, c) if char(b, cursor) != ':' { return 0, errors.ErrExpected("colon after object key", cursor) } cursor++ if cursor >= buflen { return 0, errors.ErrExpected("object value after colon", cursor) } if field != nil { if field.err != nil { return 0, field.err } if firstWin { if _, exists := seenFields[field.fieldIdx]; exists { c, err := skipValue(buf, cursor, depth) if err != nil { return 0, err } cursor = c } else { c, err := field.dec.Decode(ctx, cursor, depth, unsafe.Pointer(uintptr(p)+field.offset)) if err != nil { return 0, err } cursor = c seenFieldNum++ if d.fieldUniqueNameNum <= seenFieldNum { return skipObject(buf, cursor, depth) } seenFields[field.fieldIdx] = struct{}{} } } else { c, err := field.dec.Decode(ctx, cursor, depth, unsafe.Pointer(uintptr(p)+field.offset)) if err != nil { return 0, err } cursor = c } } else { c, err := skipValue(buf, cursor, depth) if err != nil { return 0, err } cursor = c } cursor = skipWhiteSpace(buf, cursor) if char(b, cursor) == '}' { cursor++ return cursor, nil } if char(b, cursor) != ',' { return 0, errors.ErrExpected("comma after object element", cursor) } cursor++ } } func (d *structDecoder) DecodePath(ctx *RuntimeContext, cursor, depth int64) ([][]byte, int64, error) { return nil, 0, fmt.Errorf("json: struct decoder does not support decode path") }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/goccy/go-json/internal/decoder/unmarshal_json.go
vendor/github.com/goccy/go-json/internal/decoder/unmarshal_json.go
package decoder import ( "context" "encoding/json" "fmt" "unsafe" "github.com/goccy/go-json/internal/errors" "github.com/goccy/go-json/internal/runtime" ) type unmarshalJSONDecoder struct { typ *runtime.Type structName string fieldName string } func newUnmarshalJSONDecoder(typ *runtime.Type, structName, fieldName string) *unmarshalJSONDecoder { return &unmarshalJSONDecoder{ typ: typ, structName: structName, fieldName: fieldName, } } func (d *unmarshalJSONDecoder) annotateError(cursor int64, err error) { switch e := err.(type) { case *errors.UnmarshalTypeError: e.Struct = d.structName e.Field = d.fieldName case *errors.SyntaxError: e.Offset = cursor } } func (d *unmarshalJSONDecoder) DecodeStream(s *Stream, depth int64, p unsafe.Pointer) error { s.skipWhiteSpace() start := s.cursor if err := s.skipValue(depth); err != nil { return err } src := s.buf[start:s.cursor] dst := make([]byte, len(src)) copy(dst, src) v := *(*interface{})(unsafe.Pointer(&emptyInterface{ typ: d.typ, ptr: p, })) switch v := v.(type) { case unmarshalerContext: var ctx context.Context if (s.Option.Flags & ContextOption) != 0 { ctx = s.Option.Context } else { ctx = context.Background() } if err := v.UnmarshalJSON(ctx, dst); err != nil { d.annotateError(s.cursor, err) return err } case json.Unmarshaler: if err := v.UnmarshalJSON(dst); err != nil { d.annotateError(s.cursor, err) return err } } return nil } func (d *unmarshalJSONDecoder) Decode(ctx *RuntimeContext, cursor, depth int64, p unsafe.Pointer) (int64, error) { buf := ctx.Buf cursor = skipWhiteSpace(buf, cursor) start := cursor end, err := skipValue(buf, cursor, depth) if err != nil { return 0, err } src := buf[start:end] dst := make([]byte, len(src)) copy(dst, src) v := *(*interface{})(unsafe.Pointer(&emptyInterface{ typ: d.typ, ptr: p, })) if (ctx.Option.Flags & ContextOption) != 0 { if err := v.(unmarshalerContext).UnmarshalJSON(ctx.Option.Context, dst); err != nil { d.annotateError(cursor, err) return 0, err } } else { if err := v.(json.Unmarshaler).UnmarshalJSON(dst); err != nil { d.annotateError(cursor, err) return 0, err } } return end, nil } func (d *unmarshalJSONDecoder) DecodePath(ctx *RuntimeContext, cursor, depth int64) ([][]byte, int64, error) { return nil, 0, fmt.Errorf("json: unmarshal json decoder does not support decode path") }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/goccy/go-json/internal/decoder/interface.go
vendor/github.com/goccy/go-json/internal/decoder/interface.go
package decoder import ( "bytes" "encoding" "encoding/json" "reflect" "unsafe" "github.com/goccy/go-json/internal/errors" "github.com/goccy/go-json/internal/runtime" ) type interfaceDecoder struct { typ *runtime.Type structName string fieldName string sliceDecoder *sliceDecoder mapDecoder *mapDecoder floatDecoder *floatDecoder numberDecoder *numberDecoder stringDecoder *stringDecoder } func newEmptyInterfaceDecoder(structName, fieldName string) *interfaceDecoder { ifaceDecoder := &interfaceDecoder{ typ: emptyInterfaceType, structName: structName, fieldName: fieldName, floatDecoder: newFloatDecoder(structName, fieldName, func(p unsafe.Pointer, v float64) { *(*interface{})(p) = v }), numberDecoder: newNumberDecoder(structName, fieldName, func(p unsafe.Pointer, v json.Number) { *(*interface{})(p) = v }), stringDecoder: newStringDecoder(structName, fieldName), } ifaceDecoder.sliceDecoder = newSliceDecoder( ifaceDecoder, emptyInterfaceType, emptyInterfaceType.Size(), structName, fieldName, ) ifaceDecoder.mapDecoder = newMapDecoder( interfaceMapType, stringType, ifaceDecoder.stringDecoder, interfaceMapType.Elem(), ifaceDecoder, structName, fieldName, ) return ifaceDecoder } func newInterfaceDecoder(typ *runtime.Type, structName, fieldName string) *interfaceDecoder { emptyIfaceDecoder := newEmptyInterfaceDecoder(structName, fieldName) stringDecoder := newStringDecoder(structName, fieldName) return &interfaceDecoder{ typ: typ, structName: structName, fieldName: fieldName, sliceDecoder: newSliceDecoder( emptyIfaceDecoder, emptyInterfaceType, emptyInterfaceType.Size(), structName, fieldName, ), mapDecoder: newMapDecoder( interfaceMapType, stringType, stringDecoder, interfaceMapType.Elem(), emptyIfaceDecoder, structName, fieldName, ), floatDecoder: newFloatDecoder(structName, fieldName, func(p unsafe.Pointer, v float64) { *(*interface{})(p) = v }), numberDecoder: newNumberDecoder(structName, fieldName, func(p unsafe.Pointer, v json.Number) { *(*interface{})(p) = v }), stringDecoder: stringDecoder, } } func (d *interfaceDecoder) numDecoder(s *Stream) Decoder { if s.UseNumber { return d.numberDecoder } return d.floatDecoder } var ( emptyInterfaceType = runtime.Type2RType(reflect.TypeOf((*interface{})(nil)).Elem()) EmptyInterfaceType = emptyInterfaceType interfaceMapType = runtime.Type2RType( reflect.TypeOf((*map[string]interface{})(nil)).Elem(), ) stringType = runtime.Type2RType( reflect.TypeOf(""), ) ) func decodeStreamUnmarshaler(s *Stream, depth int64, unmarshaler json.Unmarshaler) error { start := s.cursor if err := s.skipValue(depth); err != nil { return err } src := s.buf[start:s.cursor] dst := make([]byte, len(src)) copy(dst, src) if err := unmarshaler.UnmarshalJSON(dst); err != nil { return err } return nil } func decodeStreamUnmarshalerContext(s *Stream, depth int64, unmarshaler unmarshalerContext) error { start := s.cursor if err := s.skipValue(depth); err != nil { return err } src := s.buf[start:s.cursor] dst := make([]byte, len(src)) copy(dst, src) if err := unmarshaler.UnmarshalJSON(s.Option.Context, dst); err != nil { return err } return nil } func decodeUnmarshaler(buf []byte, cursor, depth int64, unmarshaler json.Unmarshaler) (int64, error) { cursor = skipWhiteSpace(buf, cursor) start := cursor end, err := skipValue(buf, cursor, depth) if err != nil { return 0, err } src := buf[start:end] dst := make([]byte, len(src)) copy(dst, src) if err := unmarshaler.UnmarshalJSON(dst); err != nil { return 0, err } return end, nil } func decodeUnmarshalerContext(ctx *RuntimeContext, buf []byte, cursor, depth int64, unmarshaler unmarshalerContext) (int64, error) { cursor = skipWhiteSpace(buf, cursor) start := cursor end, err := skipValue(buf, cursor, depth) if err != nil { return 0, err } src := buf[start:end] dst := make([]byte, len(src)) copy(dst, src) if err := unmarshaler.UnmarshalJSON(ctx.Option.Context, dst); err != nil { return 0, err } return end, nil } func decodeStreamTextUnmarshaler(s *Stream, depth int64, unmarshaler encoding.TextUnmarshaler, p unsafe.Pointer) error { start := s.cursor if err := s.skipValue(depth); err != nil { return err } src := s.buf[start:s.cursor] if bytes.Equal(src, nullbytes) { *(*unsafe.Pointer)(p) = nil return nil } dst := make([]byte, len(src)) copy(dst, src) if err := unmarshaler.UnmarshalText(dst); err != nil { return err } return nil } func decodeTextUnmarshaler(buf []byte, cursor, depth int64, unmarshaler encoding.TextUnmarshaler, p unsafe.Pointer) (int64, error) { cursor = skipWhiteSpace(buf, cursor) start := cursor end, err := skipValue(buf, cursor, depth) if err != nil { return 0, err } src := buf[start:end] if bytes.Equal(src, nullbytes) { *(*unsafe.Pointer)(p) = nil return end, nil } if s, ok := unquoteBytes(src); ok { src = s } if err := unmarshaler.UnmarshalText(src); err != nil { return 0, err } return end, nil } func (d *interfaceDecoder) decodeStreamEmptyInterface(s *Stream, depth int64, p unsafe.Pointer) error { c := s.skipWhiteSpace() for { switch c { case '{': var v map[string]interface{} ptr := unsafe.Pointer(&v) if err := d.mapDecoder.DecodeStream(s, depth, ptr); err != nil { return err } *(*interface{})(p) = v return nil case '[': var v []interface{} ptr := unsafe.Pointer(&v) if err := d.sliceDecoder.DecodeStream(s, depth, ptr); err != nil { return err } *(*interface{})(p) = v return nil case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': return d.numDecoder(s).DecodeStream(s, depth, p) case '"': s.cursor++ start := s.cursor for { switch s.char() { case '\\': if _, err := decodeEscapeString(s, nil); err != nil { return err } case '"': literal := s.buf[start:s.cursor] s.cursor++ *(*interface{})(p) = string(literal) return nil case nul: if s.read() { continue } return errors.ErrUnexpectedEndOfJSON("string", s.totalOffset()) } s.cursor++ } case 't': if err := trueBytes(s); err != nil { return err } **(**interface{})(unsafe.Pointer(&p)) = true return nil case 'f': if err := falseBytes(s); err != nil { return err } **(**interface{})(unsafe.Pointer(&p)) = false return nil case 'n': if err := nullBytes(s); err != nil { return err } *(*interface{})(p) = nil return nil case nul: if s.read() { c = s.char() continue } } break } return errors.ErrInvalidBeginningOfValue(c, s.totalOffset()) } type emptyInterface struct { typ *runtime.Type ptr unsafe.Pointer } func (d *interfaceDecoder) DecodeStream(s *Stream, depth int64, p unsafe.Pointer) error { runtimeInterfaceValue := *(*interface{})(unsafe.Pointer(&emptyInterface{ typ: d.typ, ptr: p, })) rv := reflect.ValueOf(runtimeInterfaceValue) if rv.NumMethod() > 0 && rv.CanInterface() { if u, ok := rv.Interface().(unmarshalerContext); ok { return decodeStreamUnmarshalerContext(s, depth, u) } if u, ok := rv.Interface().(json.Unmarshaler); ok { return decodeStreamUnmarshaler(s, depth, u) } if u, ok := rv.Interface().(encoding.TextUnmarshaler); ok { return decodeStreamTextUnmarshaler(s, depth, u, p) } if s.skipWhiteSpace() == 'n' { if err := nullBytes(s); err != nil { return err } *(*interface{})(p) = nil return nil } return d.errUnmarshalType(rv.Type(), s.totalOffset()) } iface := rv.Interface() ifaceHeader := (*emptyInterface)(unsafe.Pointer(&iface)) typ := ifaceHeader.typ if ifaceHeader.ptr == nil || d.typ == typ || typ == nil { // concrete type is empty interface return d.decodeStreamEmptyInterface(s, depth, p) } if typ.Kind() == reflect.Ptr && typ.Elem() == d.typ || typ.Kind() != reflect.Ptr { return d.decodeStreamEmptyInterface(s, depth, p) } if s.skipWhiteSpace() == 'n' { if err := nullBytes(s); err != nil { return err } *(*interface{})(p) = nil return nil } decoder, err := CompileToGetDecoder(typ) if err != nil { return err } return decoder.DecodeStream(s, depth, ifaceHeader.ptr) } func (d *interfaceDecoder) errUnmarshalType(typ reflect.Type, offset int64) *errors.UnmarshalTypeError { return &errors.UnmarshalTypeError{ Value: typ.String(), Type: typ, Offset: offset, Struct: d.structName, Field: d.fieldName, } } func (d *interfaceDecoder) Decode(ctx *RuntimeContext, cursor, depth int64, p unsafe.Pointer) (int64, error) { buf := ctx.Buf runtimeInterfaceValue := *(*interface{})(unsafe.Pointer(&emptyInterface{ typ: d.typ, ptr: p, })) rv := reflect.ValueOf(runtimeInterfaceValue) if rv.NumMethod() > 0 && rv.CanInterface() { if u, ok := rv.Interface().(unmarshalerContext); ok { return decodeUnmarshalerContext(ctx, buf, cursor, depth, u) } if u, ok := rv.Interface().(json.Unmarshaler); ok { return decodeUnmarshaler(buf, cursor, depth, u) } if u, ok := rv.Interface().(encoding.TextUnmarshaler); ok { return decodeTextUnmarshaler(buf, cursor, depth, u, p) } cursor = skipWhiteSpace(buf, cursor) if buf[cursor] == 'n' { if err := validateNull(buf, cursor); err != nil { return 0, err } cursor += 4 **(**interface{})(unsafe.Pointer(&p)) = nil return cursor, nil } return 0, d.errUnmarshalType(rv.Type(), cursor) } iface := rv.Interface() ifaceHeader := (*emptyInterface)(unsafe.Pointer(&iface)) typ := ifaceHeader.typ if ifaceHeader.ptr == nil || d.typ == typ || typ == nil { // concrete type is empty interface return d.decodeEmptyInterface(ctx, cursor, depth, p) } if typ.Kind() == reflect.Ptr && typ.Elem() == d.typ || typ.Kind() != reflect.Ptr { return d.decodeEmptyInterface(ctx, cursor, depth, p) } cursor = skipWhiteSpace(buf, cursor) if buf[cursor] == 'n' { if err := validateNull(buf, cursor); err != nil { return 0, err } cursor += 4 **(**interface{})(unsafe.Pointer(&p)) = nil return cursor, nil } decoder, err := CompileToGetDecoder(typ) if err != nil { return 0, err } return decoder.Decode(ctx, cursor, depth, ifaceHeader.ptr) } func (d *interfaceDecoder) decodeEmptyInterface(ctx *RuntimeContext, cursor, depth int64, p unsafe.Pointer) (int64, error) { buf := ctx.Buf cursor = skipWhiteSpace(buf, cursor) switch buf[cursor] { case '{': var v map[string]interface{} ptr := unsafe.Pointer(&v) cursor, err := d.mapDecoder.Decode(ctx, cursor, depth, ptr) if err != nil { return 0, err } **(**interface{})(unsafe.Pointer(&p)) = v return cursor, nil case '[': var v []interface{} ptr := unsafe.Pointer(&v) cursor, err := d.sliceDecoder.Decode(ctx, cursor, depth, ptr) if err != nil { return 0, err } **(**interface{})(unsafe.Pointer(&p)) = v return cursor, nil case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': return d.floatDecoder.Decode(ctx, cursor, depth, p) case '"': var v string ptr := unsafe.Pointer(&v) cursor, err := d.stringDecoder.Decode(ctx, cursor, depth, ptr) if err != nil { return 0, err } **(**interface{})(unsafe.Pointer(&p)) = v return cursor, nil case 't': if err := validateTrue(buf, cursor); err != nil { return 0, err } cursor += 4 **(**interface{})(unsafe.Pointer(&p)) = true return cursor, nil case 'f': if err := validateFalse(buf, cursor); err != nil { return 0, err } cursor += 5 **(**interface{})(unsafe.Pointer(&p)) = false return cursor, nil case 'n': if err := validateNull(buf, cursor); err != nil { return 0, err } cursor += 4 **(**interface{})(unsafe.Pointer(&p)) = nil return cursor, nil } return cursor, errors.ErrInvalidBeginningOfValue(buf[cursor], cursor) } func NewPathDecoder() Decoder { ifaceDecoder := &interfaceDecoder{ typ: emptyInterfaceType, structName: "", fieldName: "", floatDecoder: newFloatDecoder("", "", func(p unsafe.Pointer, v float64) { *(*interface{})(p) = v }), numberDecoder: newNumberDecoder("", "", func(p unsafe.Pointer, v json.Number) { *(*interface{})(p) = v }), stringDecoder: newStringDecoder("", ""), } ifaceDecoder.sliceDecoder = newSliceDecoder( ifaceDecoder, emptyInterfaceType, emptyInterfaceType.Size(), "", "", ) ifaceDecoder.mapDecoder = newMapDecoder( interfaceMapType, stringType, ifaceDecoder.stringDecoder, interfaceMapType.Elem(), ifaceDecoder, "", "", ) return ifaceDecoder } var ( truebytes = []byte("true") falsebytes = []byte("false") ) func (d *interfaceDecoder) DecodePath(ctx *RuntimeContext, cursor, depth int64) ([][]byte, int64, error) { buf := ctx.Buf cursor = skipWhiteSpace(buf, cursor) switch buf[cursor] { case '{': return d.mapDecoder.DecodePath(ctx, cursor, depth) case '[': return d.sliceDecoder.DecodePath(ctx, cursor, depth) case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': return d.floatDecoder.DecodePath(ctx, cursor, depth) case '"': return d.stringDecoder.DecodePath(ctx, cursor, depth) case 't': if err := validateTrue(buf, cursor); err != nil { return nil, 0, err } cursor += 4 return [][]byte{truebytes}, cursor, nil case 'f': if err := validateFalse(buf, cursor); err != nil { return nil, 0, err } cursor += 5 return [][]byte{falsebytes}, cursor, nil case 'n': if err := validateNull(buf, cursor); err != nil { return nil, 0, err } cursor += 4 return [][]byte{nullbytes}, cursor, nil } return nil, cursor, errors.ErrInvalidBeginningOfValue(buf[cursor], cursor) }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/goccy/go-json/internal/decoder/uint.go
vendor/github.com/goccy/go-json/internal/decoder/uint.go
package decoder import ( "fmt" "reflect" "unsafe" "github.com/goccy/go-json/internal/errors" "github.com/goccy/go-json/internal/runtime" ) type uintDecoder struct { typ *runtime.Type kind reflect.Kind op func(unsafe.Pointer, uint64) structName string fieldName string } func newUintDecoder(typ *runtime.Type, structName, fieldName string, op func(unsafe.Pointer, uint64)) *uintDecoder { return &uintDecoder{ typ: typ, kind: typ.Kind(), op: op, structName: structName, fieldName: fieldName, } } func (d *uintDecoder) typeError(buf []byte, offset int64) *errors.UnmarshalTypeError { return &errors.UnmarshalTypeError{ Value: fmt.Sprintf("number %s", string(buf)), Type: runtime.RType2Type(d.typ), Offset: offset, } } var ( pow10u64 = [...]uint64{ 1e00, 1e01, 1e02, 1e03, 1e04, 1e05, 1e06, 1e07, 1e08, 1e09, 1e10, 1e11, 1e12, 1e13, 1e14, 1e15, 1e16, 1e17, 1e18, 1e19, } pow10u64Len = len(pow10u64) ) func (d *uintDecoder) parseUint(b []byte) (uint64, error) { maxDigit := len(b) if maxDigit > pow10u64Len { return 0, fmt.Errorf("invalid length of number") } sum := uint64(0) for i := 0; i < maxDigit; i++ { c := uint64(b[i]) - 48 digitValue := pow10u64[maxDigit-i-1] sum += c * digitValue } return sum, nil } func (d *uintDecoder) decodeStreamByte(s *Stream) ([]byte, error) { for { switch s.char() { case ' ', '\n', '\t', '\r': s.cursor++ continue case '0': s.cursor++ return numZeroBuf, nil case '1', '2', '3', '4', '5', '6', '7', '8', '9': start := s.cursor for { s.cursor++ if numTable[s.char()] { continue } else if s.char() == nul { if s.read() { s.cursor-- // for retry current character continue } } break } num := s.buf[start:s.cursor] return num, nil case 'n': if err := nullBytes(s); err != nil { return nil, err } return nil, nil case nul: if s.read() { continue } default: return nil, d.typeError([]byte{s.char()}, s.totalOffset()) } break } return nil, errors.ErrUnexpectedEndOfJSON("number(unsigned integer)", s.totalOffset()) } func (d *uintDecoder) decodeByte(buf []byte, cursor int64) ([]byte, int64, error) { for { switch buf[cursor] { case ' ', '\n', '\t', '\r': cursor++ continue case '0': cursor++ return numZeroBuf, cursor, nil case '1', '2', '3', '4', '5', '6', '7', '8', '9': start := cursor cursor++ for numTable[buf[cursor]] { cursor++ } num := buf[start:cursor] return num, cursor, nil case 'n': if err := validateNull(buf, cursor); err != nil { return nil, 0, err } cursor += 4 return nil, cursor, nil default: return nil, 0, d.typeError([]byte{buf[cursor]}, cursor) } } } func (d *uintDecoder) DecodeStream(s *Stream, depth int64, p unsafe.Pointer) error { bytes, err := d.decodeStreamByte(s) if err != nil { return err } if bytes == nil { return nil } u64, err := d.parseUint(bytes) if err != nil { return d.typeError(bytes, s.totalOffset()) } switch d.kind { case reflect.Uint8: if (1 << 8) <= u64 { return d.typeError(bytes, s.totalOffset()) } case reflect.Uint16: if (1 << 16) <= u64 { return d.typeError(bytes, s.totalOffset()) } case reflect.Uint32: if (1 << 32) <= u64 { return d.typeError(bytes, s.totalOffset()) } } d.op(p, u64) return nil } func (d *uintDecoder) Decode(ctx *RuntimeContext, cursor, depth int64, p unsafe.Pointer) (int64, error) { bytes, c, err := d.decodeByte(ctx.Buf, cursor) if err != nil { return 0, err } if bytes == nil { return c, nil } cursor = c u64, err := d.parseUint(bytes) if err != nil { return 0, d.typeError(bytes, cursor) } switch d.kind { case reflect.Uint8: if (1 << 8) <= u64 { return 0, d.typeError(bytes, cursor) } case reflect.Uint16: if (1 << 16) <= u64 { return 0, d.typeError(bytes, cursor) } case reflect.Uint32: if (1 << 32) <= u64 { return 0, d.typeError(bytes, cursor) } } d.op(p, u64) return cursor, nil } func (d *uintDecoder) DecodePath(ctx *RuntimeContext, cursor, depth int64) ([][]byte, int64, error) { return nil, 0, fmt.Errorf("json: uint decoder does not support decode path") }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/goccy/go-json/internal/decoder/array.go
vendor/github.com/goccy/go-json/internal/decoder/array.go
package decoder import ( "fmt" "unsafe" "github.com/goccy/go-json/internal/errors" "github.com/goccy/go-json/internal/runtime" ) type arrayDecoder struct { elemType *runtime.Type size uintptr valueDecoder Decoder alen int structName string fieldName string zeroValue unsafe.Pointer } func newArrayDecoder(dec Decoder, elemType *runtime.Type, alen int, structName, fieldName string) *arrayDecoder { // workaround to avoid checkptr errors. cannot use `*(*unsafe.Pointer)(unsafe_New(elemType))` directly. zeroValuePtr := unsafe_New(elemType) zeroValue := **(**unsafe.Pointer)(unsafe.Pointer(&zeroValuePtr)) return &arrayDecoder{ valueDecoder: dec, elemType: elemType, size: elemType.Size(), alen: alen, structName: structName, fieldName: fieldName, zeroValue: zeroValue, } } func (d *arrayDecoder) DecodeStream(s *Stream, depth int64, p unsafe.Pointer) error { depth++ if depth > maxDecodeNestingDepth { return errors.ErrExceededMaxDepth(s.char(), s.cursor) } for { switch s.char() { case ' ', '\n', '\t', '\r': case 'n': if err := nullBytes(s); err != nil { return err } return nil case '[': idx := 0 s.cursor++ if s.skipWhiteSpace() == ']' { for idx < d.alen { *(*unsafe.Pointer)(unsafe.Pointer(uintptr(p) + uintptr(idx)*d.size)) = d.zeroValue idx++ } s.cursor++ return nil } for { if idx < d.alen { if err := d.valueDecoder.DecodeStream(s, depth, unsafe.Pointer(uintptr(p)+uintptr(idx)*d.size)); err != nil { return err } } else { if err := s.skipValue(depth); err != nil { return err } } idx++ switch s.skipWhiteSpace() { case ']': for idx < d.alen { *(*unsafe.Pointer)(unsafe.Pointer(uintptr(p) + uintptr(idx)*d.size)) = d.zeroValue idx++ } s.cursor++ return nil case ',': s.cursor++ continue case nul: if s.read() { s.cursor++ continue } goto ERROR default: goto ERROR } } case nul: if s.read() { continue } goto ERROR default: goto ERROR } s.cursor++ } ERROR: return errors.ErrUnexpectedEndOfJSON("array", s.totalOffset()) } func (d *arrayDecoder) Decode(ctx *RuntimeContext, cursor, depth int64, p unsafe.Pointer) (int64, error) { buf := ctx.Buf depth++ if depth > maxDecodeNestingDepth { return 0, errors.ErrExceededMaxDepth(buf[cursor], cursor) } for { switch buf[cursor] { case ' ', '\n', '\t', '\r': cursor++ continue case 'n': if err := validateNull(buf, cursor); err != nil { return 0, err } cursor += 4 return cursor, nil case '[': idx := 0 cursor++ cursor = skipWhiteSpace(buf, cursor) if buf[cursor] == ']' { for idx < d.alen { *(*unsafe.Pointer)(unsafe.Pointer(uintptr(p) + uintptr(idx)*d.size)) = d.zeroValue idx++ } cursor++ return cursor, nil } for { if idx < d.alen { c, err := d.valueDecoder.Decode(ctx, cursor, depth, unsafe.Pointer(uintptr(p)+uintptr(idx)*d.size)) if err != nil { return 0, err } cursor = c } else { c, err := skipValue(buf, cursor, depth) if err != nil { return 0, err } cursor = c } idx++ cursor = skipWhiteSpace(buf, cursor) switch buf[cursor] { case ']': for idx < d.alen { *(*unsafe.Pointer)(unsafe.Pointer(uintptr(p) + uintptr(idx)*d.size)) = d.zeroValue idx++ } cursor++ return cursor, nil case ',': cursor++ continue default: return 0, errors.ErrInvalidCharacter(buf[cursor], "array", cursor) } } default: return 0, errors.ErrUnexpectedEndOfJSON("array", cursor) } } } func (d *arrayDecoder) DecodePath(ctx *RuntimeContext, cursor, depth int64) ([][]byte, int64, error) { return nil, 0, fmt.Errorf("json: array decoder does not support decode path") }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/goccy/go-json/internal/decoder/map.go
vendor/github.com/goccy/go-json/internal/decoder/map.go
package decoder import ( "reflect" "unsafe" "github.com/goccy/go-json/internal/errors" "github.com/goccy/go-json/internal/runtime" ) type mapDecoder struct { mapType *runtime.Type keyType *runtime.Type valueType *runtime.Type canUseAssignFaststrType bool keyDecoder Decoder valueDecoder Decoder structName string fieldName string } func newMapDecoder(mapType *runtime.Type, keyType *runtime.Type, keyDec Decoder, valueType *runtime.Type, valueDec Decoder, structName, fieldName string) *mapDecoder { return &mapDecoder{ mapType: mapType, keyDecoder: keyDec, keyType: keyType, canUseAssignFaststrType: canUseAssignFaststrType(keyType, valueType), valueType: valueType, valueDecoder: valueDec, structName: structName, fieldName: fieldName, } } const ( mapMaxElemSize = 128 ) // See detail: https://github.com/goccy/go-json/pull/283 func canUseAssignFaststrType(key *runtime.Type, value *runtime.Type) bool { indirectElem := value.Size() > mapMaxElemSize if indirectElem { return false } return key.Kind() == reflect.String } //go:linkname makemap reflect.makemap func makemap(*runtime.Type, int) unsafe.Pointer //nolint:golint //go:linkname mapassign_faststr runtime.mapassign_faststr //go:noescape func mapassign_faststr(t *runtime.Type, m unsafe.Pointer, s string) unsafe.Pointer //go:linkname mapassign reflect.mapassign //go:noescape func mapassign(t *runtime.Type, m unsafe.Pointer, k, v unsafe.Pointer) func (d *mapDecoder) mapassign(t *runtime.Type, m, k, v unsafe.Pointer) { if d.canUseAssignFaststrType { mapV := mapassign_faststr(t, m, *(*string)(k)) typedmemmove(d.valueType, mapV, v) } else { mapassign(t, m, k, v) } } func (d *mapDecoder) DecodeStream(s *Stream, depth int64, p unsafe.Pointer) error { depth++ if depth > maxDecodeNestingDepth { return errors.ErrExceededMaxDepth(s.char(), s.cursor) } switch s.skipWhiteSpace() { case 'n': if err := nullBytes(s); err != nil { return err } **(**unsafe.Pointer)(unsafe.Pointer(&p)) = nil return nil case '{': default: return errors.ErrExpected("{ character for map value", s.totalOffset()) } mapValue := *(*unsafe.Pointer)(p) if mapValue == nil { mapValue = makemap(d.mapType, 0) } s.cursor++ if s.skipWhiteSpace() == '}' { *(*unsafe.Pointer)(p) = mapValue s.cursor++ return nil } for { k := unsafe_New(d.keyType) if err := d.keyDecoder.DecodeStream(s, depth, k); err != nil { return err } s.skipWhiteSpace() if !s.equalChar(':') { return errors.ErrExpected("colon after object key", s.totalOffset()) } s.cursor++ v := unsafe_New(d.valueType) if err := d.valueDecoder.DecodeStream(s, depth, v); err != nil { return err } d.mapassign(d.mapType, mapValue, k, v) s.skipWhiteSpace() if s.equalChar('}') { **(**unsafe.Pointer)(unsafe.Pointer(&p)) = mapValue s.cursor++ return nil } if !s.equalChar(',') { return errors.ErrExpected("comma after object value", s.totalOffset()) } s.cursor++ } } func (d *mapDecoder) Decode(ctx *RuntimeContext, cursor, depth int64, p unsafe.Pointer) (int64, error) { buf := ctx.Buf depth++ if depth > maxDecodeNestingDepth { return 0, errors.ErrExceededMaxDepth(buf[cursor], cursor) } cursor = skipWhiteSpace(buf, cursor) buflen := int64(len(buf)) if buflen < 2 { return 0, errors.ErrExpected("{} for map", cursor) } switch buf[cursor] { case 'n': if err := validateNull(buf, cursor); err != nil { return 0, err } cursor += 4 **(**unsafe.Pointer)(unsafe.Pointer(&p)) = nil return cursor, nil case '{': default: return 0, errors.ErrExpected("{ character for map value", cursor) } cursor++ cursor = skipWhiteSpace(buf, cursor) mapValue := *(*unsafe.Pointer)(p) if mapValue == nil { mapValue = makemap(d.mapType, 0) } if buf[cursor] == '}' { **(**unsafe.Pointer)(unsafe.Pointer(&p)) = mapValue cursor++ return cursor, nil } for { k := unsafe_New(d.keyType) keyCursor, err := d.keyDecoder.Decode(ctx, cursor, depth, k) if err != nil { return 0, err } cursor = skipWhiteSpace(buf, keyCursor) if buf[cursor] != ':' { return 0, errors.ErrExpected("colon after object key", cursor) } cursor++ v := unsafe_New(d.valueType) valueCursor, err := d.valueDecoder.Decode(ctx, cursor, depth, v) if err != nil { return 0, err } d.mapassign(d.mapType, mapValue, k, v) cursor = skipWhiteSpace(buf, valueCursor) if buf[cursor] == '}' { **(**unsafe.Pointer)(unsafe.Pointer(&p)) = mapValue cursor++ return cursor, nil } if buf[cursor] != ',' { return 0, errors.ErrExpected("comma after object value", cursor) } cursor++ } } func (d *mapDecoder) DecodePath(ctx *RuntimeContext, cursor, depth int64) ([][]byte, int64, error) { buf := ctx.Buf depth++ if depth > maxDecodeNestingDepth { return nil, 0, errors.ErrExceededMaxDepth(buf[cursor], cursor) } cursor = skipWhiteSpace(buf, cursor) buflen := int64(len(buf)) if buflen < 2 { return nil, 0, errors.ErrExpected("{} for map", cursor) } switch buf[cursor] { case 'n': if err := validateNull(buf, cursor); err != nil { return nil, 0, err } cursor += 4 return [][]byte{nullbytes}, cursor, nil case '{': default: return nil, 0, errors.ErrExpected("{ character for map value", cursor) } cursor++ cursor = skipWhiteSpace(buf, cursor) if buf[cursor] == '}' { cursor++ return nil, cursor, nil } keyDecoder, ok := d.keyDecoder.(*stringDecoder) if !ok { return nil, 0, &errors.UnmarshalTypeError{ Value: "string", Type: reflect.TypeOf(""), Offset: cursor, Struct: d.structName, Field: d.fieldName, } } ret := [][]byte{} for { key, keyCursor, err := keyDecoder.decodeByte(buf, cursor) if err != nil { return nil, 0, err } cursor = skipWhiteSpace(buf, keyCursor) if buf[cursor] != ':' { return nil, 0, errors.ErrExpected("colon after object key", cursor) } cursor++ child, found, err := ctx.Option.Path.Field(string(key)) if err != nil { return nil, 0, err } if found { if child != nil { oldPath := ctx.Option.Path.node ctx.Option.Path.node = child paths, c, err := d.valueDecoder.DecodePath(ctx, cursor, depth) if err != nil { return nil, 0, err } ctx.Option.Path.node = oldPath ret = append(ret, paths...) cursor = c } else { start := cursor end, err := skipValue(buf, cursor, depth) if err != nil { return nil, 0, err } ret = append(ret, buf[start:end]) cursor = end } } else { c, err := skipValue(buf, cursor, depth) if err != nil { return nil, 0, err } cursor = c } cursor = skipWhiteSpace(buf, cursor) if buf[cursor] == '}' { cursor++ return ret, cursor, nil } if buf[cursor] != ',' { return nil, 0, errors.ErrExpected("comma after object value", cursor) } cursor++ } }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/goccy/go-json/internal/decoder/compile.go
vendor/github.com/goccy/go-json/internal/decoder/compile.go
package decoder import ( "encoding/json" "fmt" "reflect" "strings" "sync" "sync/atomic" "unicode" "unsafe" "github.com/goccy/go-json/internal/runtime" ) var ( jsonNumberType = reflect.TypeOf(json.Number("")) typeAddr *runtime.TypeAddr cachedDecoderMap unsafe.Pointer // map[uintptr]decoder cachedDecoder []Decoder initOnce sync.Once ) func initDecoder() { initOnce.Do(func() { typeAddr = runtime.AnalyzeTypeAddr() if typeAddr == nil { typeAddr = &runtime.TypeAddr{} } cachedDecoder = make([]Decoder, typeAddr.AddrRange>>typeAddr.AddrShift+1) }) } func loadDecoderMap() map[uintptr]Decoder { initDecoder() p := atomic.LoadPointer(&cachedDecoderMap) return *(*map[uintptr]Decoder)(unsafe.Pointer(&p)) } func storeDecoder(typ uintptr, dec Decoder, m map[uintptr]Decoder) { initDecoder() newDecoderMap := make(map[uintptr]Decoder, len(m)+1) newDecoderMap[typ] = dec for k, v := range m { newDecoderMap[k] = v } atomic.StorePointer(&cachedDecoderMap, *(*unsafe.Pointer)(unsafe.Pointer(&newDecoderMap))) } func compileToGetDecoderSlowPath(typeptr uintptr, typ *runtime.Type) (Decoder, error) { decoderMap := loadDecoderMap() if dec, exists := decoderMap[typeptr]; exists { return dec, nil } dec, err := compileHead(typ, map[uintptr]Decoder{}) if err != nil { return nil, err } storeDecoder(typeptr, dec, decoderMap) return dec, nil } func compileHead(typ *runtime.Type, structTypeToDecoder map[uintptr]Decoder) (Decoder, error) { switch { case implementsUnmarshalJSONType(runtime.PtrTo(typ)): return newUnmarshalJSONDecoder(runtime.PtrTo(typ), "", ""), nil case runtime.PtrTo(typ).Implements(unmarshalTextType): return newUnmarshalTextDecoder(runtime.PtrTo(typ), "", ""), nil } return compile(typ.Elem(), "", "", structTypeToDecoder) } func compile(typ *runtime.Type, structName, fieldName string, structTypeToDecoder map[uintptr]Decoder) (Decoder, error) { switch { case implementsUnmarshalJSONType(runtime.PtrTo(typ)): return newUnmarshalJSONDecoder(runtime.PtrTo(typ), structName, fieldName), nil case runtime.PtrTo(typ).Implements(unmarshalTextType): return newUnmarshalTextDecoder(runtime.PtrTo(typ), structName, fieldName), nil } switch typ.Kind() { case reflect.Ptr: return compilePtr(typ, structName, fieldName, structTypeToDecoder) case reflect.Struct: return compileStruct(typ, structName, fieldName, structTypeToDecoder) case reflect.Slice: elem := typ.Elem() if elem.Kind() == reflect.Uint8 { return compileBytes(elem, structName, fieldName) } return compileSlice(typ, structName, fieldName, structTypeToDecoder) case reflect.Array: return compileArray(typ, structName, fieldName, structTypeToDecoder) case reflect.Map: return compileMap(typ, structName, fieldName, structTypeToDecoder) case reflect.Interface: return compileInterface(typ, structName, fieldName) case reflect.Uintptr: return compileUint(typ, structName, fieldName) case reflect.Int: return compileInt(typ, structName, fieldName) case reflect.Int8: return compileInt8(typ, structName, fieldName) case reflect.Int16: return compileInt16(typ, structName, fieldName) case reflect.Int32: return compileInt32(typ, structName, fieldName) case reflect.Int64: return compileInt64(typ, structName, fieldName) case reflect.Uint: return compileUint(typ, structName, fieldName) case reflect.Uint8: return compileUint8(typ, structName, fieldName) case reflect.Uint16: return compileUint16(typ, structName, fieldName) case reflect.Uint32: return compileUint32(typ, structName, fieldName) case reflect.Uint64: return compileUint64(typ, structName, fieldName) case reflect.String: return compileString(typ, structName, fieldName) case reflect.Bool: return compileBool(structName, fieldName) case reflect.Float32: return compileFloat32(structName, fieldName) case reflect.Float64: return compileFloat64(structName, fieldName) case reflect.Func: return compileFunc(typ, structName, fieldName) } return newInvalidDecoder(typ, structName, fieldName), nil } func isStringTagSupportedType(typ *runtime.Type) bool { switch { case implementsUnmarshalJSONType(runtime.PtrTo(typ)): return false case runtime.PtrTo(typ).Implements(unmarshalTextType): return false } switch typ.Kind() { case reflect.Map: return false case reflect.Slice: return false case reflect.Array: return false case reflect.Struct: return false case reflect.Interface: return false } return true } func compileMapKey(typ *runtime.Type, structName, fieldName string, structTypeToDecoder map[uintptr]Decoder) (Decoder, error) { if runtime.PtrTo(typ).Implements(unmarshalTextType) { return newUnmarshalTextDecoder(runtime.PtrTo(typ), structName, fieldName), nil } if typ.Kind() == reflect.String { return newStringDecoder(structName, fieldName), nil } dec, err := compile(typ, structName, fieldName, structTypeToDecoder) if err != nil { return nil, err } for { switch t := dec.(type) { case *stringDecoder, *interfaceDecoder: return dec, nil case *boolDecoder, *intDecoder, *uintDecoder, *numberDecoder: return newWrappedStringDecoder(typ, dec, structName, fieldName), nil case *ptrDecoder: dec = t.dec default: return newInvalidDecoder(typ, structName, fieldName), nil } } } func compilePtr(typ *runtime.Type, structName, fieldName string, structTypeToDecoder map[uintptr]Decoder) (Decoder, error) { dec, err := compile(typ.Elem(), structName, fieldName, structTypeToDecoder) if err != nil { return nil, err } return newPtrDecoder(dec, typ.Elem(), structName, fieldName), nil } func compileInt(typ *runtime.Type, structName, fieldName string) (Decoder, error) { return newIntDecoder(typ, structName, fieldName, func(p unsafe.Pointer, v int64) { *(*int)(p) = int(v) }), nil } func compileInt8(typ *runtime.Type, structName, fieldName string) (Decoder, error) { return newIntDecoder(typ, structName, fieldName, func(p unsafe.Pointer, v int64) { *(*int8)(p) = int8(v) }), nil } func compileInt16(typ *runtime.Type, structName, fieldName string) (Decoder, error) { return newIntDecoder(typ, structName, fieldName, func(p unsafe.Pointer, v int64) { *(*int16)(p) = int16(v) }), nil } func compileInt32(typ *runtime.Type, structName, fieldName string) (Decoder, error) { return newIntDecoder(typ, structName, fieldName, func(p unsafe.Pointer, v int64) { *(*int32)(p) = int32(v) }), nil } func compileInt64(typ *runtime.Type, structName, fieldName string) (Decoder, error) { return newIntDecoder(typ, structName, fieldName, func(p unsafe.Pointer, v int64) { *(*int64)(p) = v }), nil } func compileUint(typ *runtime.Type, structName, fieldName string) (Decoder, error) { return newUintDecoder(typ, structName, fieldName, func(p unsafe.Pointer, v uint64) { *(*uint)(p) = uint(v) }), nil } func compileUint8(typ *runtime.Type, structName, fieldName string) (Decoder, error) { return newUintDecoder(typ, structName, fieldName, func(p unsafe.Pointer, v uint64) { *(*uint8)(p) = uint8(v) }), nil } func compileUint16(typ *runtime.Type, structName, fieldName string) (Decoder, error) { return newUintDecoder(typ, structName, fieldName, func(p unsafe.Pointer, v uint64) { *(*uint16)(p) = uint16(v) }), nil } func compileUint32(typ *runtime.Type, structName, fieldName string) (Decoder, error) { return newUintDecoder(typ, structName, fieldName, func(p unsafe.Pointer, v uint64) { *(*uint32)(p) = uint32(v) }), nil } func compileUint64(typ *runtime.Type, structName, fieldName string) (Decoder, error) { return newUintDecoder(typ, structName, fieldName, func(p unsafe.Pointer, v uint64) { *(*uint64)(p) = v }), nil } func compileFloat32(structName, fieldName string) (Decoder, error) { return newFloatDecoder(structName, fieldName, func(p unsafe.Pointer, v float64) { *(*float32)(p) = float32(v) }), nil } func compileFloat64(structName, fieldName string) (Decoder, error) { return newFloatDecoder(structName, fieldName, func(p unsafe.Pointer, v float64) { *(*float64)(p) = v }), nil } func compileString(typ *runtime.Type, structName, fieldName string) (Decoder, error) { if typ == runtime.Type2RType(jsonNumberType) { return newNumberDecoder(structName, fieldName, func(p unsafe.Pointer, v json.Number) { *(*json.Number)(p) = v }), nil } return newStringDecoder(structName, fieldName), nil } func compileBool(structName, fieldName string) (Decoder, error) { return newBoolDecoder(structName, fieldName), nil } func compileBytes(typ *runtime.Type, structName, fieldName string) (Decoder, error) { return newBytesDecoder(typ, structName, fieldName), nil } func compileSlice(typ *runtime.Type, structName, fieldName string, structTypeToDecoder map[uintptr]Decoder) (Decoder, error) { elem := typ.Elem() decoder, err := compile(elem, structName, fieldName, structTypeToDecoder) if err != nil { return nil, err } return newSliceDecoder(decoder, elem, elem.Size(), structName, fieldName), nil } func compileArray(typ *runtime.Type, structName, fieldName string, structTypeToDecoder map[uintptr]Decoder) (Decoder, error) { elem := typ.Elem() decoder, err := compile(elem, structName, fieldName, structTypeToDecoder) if err != nil { return nil, err } return newArrayDecoder(decoder, elem, typ.Len(), structName, fieldName), nil } func compileMap(typ *runtime.Type, structName, fieldName string, structTypeToDecoder map[uintptr]Decoder) (Decoder, error) { keyDec, err := compileMapKey(typ.Key(), structName, fieldName, structTypeToDecoder) if err != nil { return nil, err } valueDec, err := compile(typ.Elem(), structName, fieldName, structTypeToDecoder) if err != nil { return nil, err } return newMapDecoder(typ, typ.Key(), keyDec, typ.Elem(), valueDec, structName, fieldName), nil } func compileInterface(typ *runtime.Type, structName, fieldName string) (Decoder, error) { return newInterfaceDecoder(typ, structName, fieldName), nil } func compileFunc(typ *runtime.Type, strutName, fieldName string) (Decoder, error) { return newFuncDecoder(typ, strutName, fieldName), nil } func typeToStructTags(typ *runtime.Type) runtime.StructTags { tags := runtime.StructTags{} fieldNum := typ.NumField() for i := 0; i < fieldNum; i++ { field := typ.Field(i) if runtime.IsIgnoredStructField(field) { continue } tags = append(tags, runtime.StructTagFromField(field)) } return tags } func compileStruct(typ *runtime.Type, structName, fieldName string, structTypeToDecoder map[uintptr]Decoder) (Decoder, error) { fieldNum := typ.NumField() fieldMap := map[string]*structFieldSet{} typeptr := uintptr(unsafe.Pointer(typ)) if dec, exists := structTypeToDecoder[typeptr]; exists { return dec, nil } structDec := newStructDecoder(structName, fieldName, fieldMap) structTypeToDecoder[typeptr] = structDec structName = typ.Name() tags := typeToStructTags(typ) allFields := []*structFieldSet{} for i := 0; i < fieldNum; i++ { field := typ.Field(i) if runtime.IsIgnoredStructField(field) { continue } isUnexportedField := unicode.IsLower([]rune(field.Name)[0]) tag := runtime.StructTagFromField(field) dec, err := compile(runtime.Type2RType(field.Type), structName, field.Name, structTypeToDecoder) if err != nil { return nil, err } if field.Anonymous && !tag.IsTaggedKey { if stDec, ok := dec.(*structDecoder); ok { if runtime.Type2RType(field.Type) == typ { // recursive definition continue } for k, v := range stDec.fieldMap { if tags.ExistsKey(k) { continue } fieldSet := &structFieldSet{ dec: v.dec, offset: field.Offset + v.offset, isTaggedKey: v.isTaggedKey, key: k, keyLen: int64(len(k)), } allFields = append(allFields, fieldSet) } } else if pdec, ok := dec.(*ptrDecoder); ok { contentDec := pdec.contentDecoder() if pdec.typ == typ { // recursive definition continue } var fieldSetErr error if isUnexportedField { fieldSetErr = fmt.Errorf( "json: cannot set embedded pointer to unexported struct: %v", field.Type.Elem(), ) } if dec, ok := contentDec.(*structDecoder); ok { for k, v := range dec.fieldMap { if tags.ExistsKey(k) { continue } fieldSet := &structFieldSet{ dec: newAnonymousFieldDecoder(pdec.typ, v.offset, v.dec), offset: field.Offset, isTaggedKey: v.isTaggedKey, key: k, keyLen: int64(len(k)), err: fieldSetErr, } allFields = append(allFields, fieldSet) } } else { fieldSet := &structFieldSet{ dec: pdec, offset: field.Offset, isTaggedKey: tag.IsTaggedKey, key: field.Name, keyLen: int64(len(field.Name)), } allFields = append(allFields, fieldSet) } } else { fieldSet := &structFieldSet{ dec: dec, offset: field.Offset, isTaggedKey: tag.IsTaggedKey, key: field.Name, keyLen: int64(len(field.Name)), } allFields = append(allFields, fieldSet) } } else { if tag.IsString && isStringTagSupportedType(runtime.Type2RType(field.Type)) { dec = newWrappedStringDecoder(runtime.Type2RType(field.Type), dec, structName, field.Name) } var key string if tag.Key != "" { key = tag.Key } else { key = field.Name } fieldSet := &structFieldSet{ dec: dec, offset: field.Offset, isTaggedKey: tag.IsTaggedKey, key: key, keyLen: int64(len(key)), } allFields = append(allFields, fieldSet) } } for _, set := range filterDuplicatedFields(allFields) { fieldMap[set.key] = set lower := strings.ToLower(set.key) if _, exists := fieldMap[lower]; !exists { // first win fieldMap[lower] = set } } delete(structTypeToDecoder, typeptr) structDec.tryOptimize() return structDec, nil } func filterDuplicatedFields(allFields []*structFieldSet) []*structFieldSet { fieldMap := map[string][]*structFieldSet{} for _, field := range allFields { fieldMap[field.key] = append(fieldMap[field.key], field) } duplicatedFieldMap := map[string]struct{}{} for k, sets := range fieldMap { sets = filterFieldSets(sets) if len(sets) != 1 { duplicatedFieldMap[k] = struct{}{} } } filtered := make([]*structFieldSet, 0, len(allFields)) for _, field := range allFields { if _, exists := duplicatedFieldMap[field.key]; exists { continue } filtered = append(filtered, field) } return filtered } func filterFieldSets(sets []*structFieldSet) []*structFieldSet { if len(sets) == 1 { return sets } filtered := make([]*structFieldSet, 0, len(sets)) for _, set := range sets { if set.isTaggedKey { filtered = append(filtered, set) } } return filtered } func implementsUnmarshalJSONType(typ *runtime.Type) bool { return typ.Implements(unmarshalJSONType) || typ.Implements(unmarshalJSONContextType) }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/goccy/go-json/internal/decoder/slice.go
vendor/github.com/goccy/go-json/internal/decoder/slice.go
package decoder import ( "reflect" "sync" "unsafe" "github.com/goccy/go-json/internal/errors" "github.com/goccy/go-json/internal/runtime" ) var ( sliceType = runtime.Type2RType( reflect.TypeOf((*sliceHeader)(nil)).Elem(), ) nilSlice = unsafe.Pointer(&sliceHeader{}) ) type sliceDecoder struct { elemType *runtime.Type isElemPointerType bool valueDecoder Decoder size uintptr arrayPool sync.Pool structName string fieldName string } // If use reflect.SliceHeader, data type is uintptr. // In this case, Go compiler cannot trace reference created by newArray(). // So, define using unsafe.Pointer as data type type sliceHeader struct { data unsafe.Pointer len int cap int } const ( defaultSliceCapacity = 2 ) func newSliceDecoder(dec Decoder, elemType *runtime.Type, size uintptr, structName, fieldName string) *sliceDecoder { return &sliceDecoder{ valueDecoder: dec, elemType: elemType, isElemPointerType: elemType.Kind() == reflect.Ptr || elemType.Kind() == reflect.Map, size: size, arrayPool: sync.Pool{ New: func() interface{} { return &sliceHeader{ data: newArray(elemType, defaultSliceCapacity), len: 0, cap: defaultSliceCapacity, } }, }, structName: structName, fieldName: fieldName, } } func (d *sliceDecoder) newSlice(src *sliceHeader) *sliceHeader { slice := d.arrayPool.Get().(*sliceHeader) if src.len > 0 { // copy original elem if slice.cap < src.cap { data := newArray(d.elemType, src.cap) slice = &sliceHeader{data: data, len: src.len, cap: src.cap} } else { slice.len = src.len } copySlice(d.elemType, *slice, *src) } else { slice.len = 0 } return slice } func (d *sliceDecoder) releaseSlice(p *sliceHeader) { d.arrayPool.Put(p) } //go:linkname copySlice reflect.typedslicecopy func copySlice(elemType *runtime.Type, dst, src sliceHeader) int //go:linkname newArray reflect.unsafe_NewArray func newArray(*runtime.Type, int) unsafe.Pointer //go:linkname typedmemmove reflect.typedmemmove func typedmemmove(t *runtime.Type, dst, src unsafe.Pointer) func (d *sliceDecoder) errNumber(offset int64) *errors.UnmarshalTypeError { return &errors.UnmarshalTypeError{ Value: "number", Type: reflect.SliceOf(runtime.RType2Type(d.elemType)), Struct: d.structName, Field: d.fieldName, Offset: offset, } } func (d *sliceDecoder) DecodeStream(s *Stream, depth int64, p unsafe.Pointer) error { depth++ if depth > maxDecodeNestingDepth { return errors.ErrExceededMaxDepth(s.char(), s.cursor) } for { switch s.char() { case ' ', '\n', '\t', '\r': s.cursor++ continue case 'n': if err := nullBytes(s); err != nil { return err } typedmemmove(sliceType, p, nilSlice) return nil case '[': s.cursor++ if s.skipWhiteSpace() == ']' { dst := (*sliceHeader)(p) if dst.data == nil { dst.data = newArray(d.elemType, 0) } else { dst.len = 0 } s.cursor++ return nil } idx := 0 slice := d.newSlice((*sliceHeader)(p)) srcLen := slice.len capacity := slice.cap data := slice.data for { if capacity <= idx { src := sliceHeader{data: data, len: idx, cap: capacity} capacity *= 2 data = newArray(d.elemType, capacity) dst := sliceHeader{data: data, len: idx, cap: capacity} copySlice(d.elemType, dst, src) } ep := unsafe.Pointer(uintptr(data) + uintptr(idx)*d.size) // if srcLen is greater than idx, keep the original reference if srcLen <= idx { if d.isElemPointerType { **(**unsafe.Pointer)(unsafe.Pointer(&ep)) = nil // initialize elem pointer } else { // assign new element to the slice typedmemmove(d.elemType, ep, unsafe_New(d.elemType)) } } if err := d.valueDecoder.DecodeStream(s, depth, ep); err != nil { return err } s.skipWhiteSpace() RETRY: switch s.char() { case ']': slice.cap = capacity slice.len = idx + 1 slice.data = data dst := (*sliceHeader)(p) dst.len = idx + 1 if dst.len > dst.cap { dst.data = newArray(d.elemType, dst.len) dst.cap = dst.len } copySlice(d.elemType, *dst, *slice) d.releaseSlice(slice) s.cursor++ return nil case ',': idx++ case nul: if s.read() { goto RETRY } slice.cap = capacity slice.data = data d.releaseSlice(slice) goto ERROR default: slice.cap = capacity slice.data = data d.releaseSlice(slice) goto ERROR } s.cursor++ } case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': return d.errNumber(s.totalOffset()) case nul: if s.read() { continue } goto ERROR default: goto ERROR } } ERROR: return errors.ErrUnexpectedEndOfJSON("slice", s.totalOffset()) } func (d *sliceDecoder) Decode(ctx *RuntimeContext, cursor, depth int64, p unsafe.Pointer) (int64, error) { buf := ctx.Buf depth++ if depth > maxDecodeNestingDepth { return 0, errors.ErrExceededMaxDepth(buf[cursor], cursor) } for { switch buf[cursor] { case ' ', '\n', '\t', '\r': cursor++ continue case 'n': if err := validateNull(buf, cursor); err != nil { return 0, err } cursor += 4 typedmemmove(sliceType, p, nilSlice) return cursor, nil case '[': cursor++ cursor = skipWhiteSpace(buf, cursor) if buf[cursor] == ']' { dst := (*sliceHeader)(p) if dst.data == nil { dst.data = newArray(d.elemType, 0) } else { dst.len = 0 } cursor++ return cursor, nil } idx := 0 slice := d.newSlice((*sliceHeader)(p)) srcLen := slice.len capacity := slice.cap data := slice.data for { if capacity <= idx { src := sliceHeader{data: data, len: idx, cap: capacity} capacity *= 2 data = newArray(d.elemType, capacity) dst := sliceHeader{data: data, len: idx, cap: capacity} copySlice(d.elemType, dst, src) } ep := unsafe.Pointer(uintptr(data) + uintptr(idx)*d.size) // if srcLen is greater than idx, keep the original reference if srcLen <= idx { if d.isElemPointerType { **(**unsafe.Pointer)(unsafe.Pointer(&ep)) = nil // initialize elem pointer } else { // assign new element to the slice typedmemmove(d.elemType, ep, unsafe_New(d.elemType)) } } c, err := d.valueDecoder.Decode(ctx, cursor, depth, ep) if err != nil { return 0, err } cursor = c cursor = skipWhiteSpace(buf, cursor) switch buf[cursor] { case ']': slice.cap = capacity slice.len = idx + 1 slice.data = data dst := (*sliceHeader)(p) dst.len = idx + 1 if dst.len > dst.cap { dst.data = newArray(d.elemType, dst.len) dst.cap = dst.len } copySlice(d.elemType, *dst, *slice) d.releaseSlice(slice) cursor++ return cursor, nil case ',': idx++ default: slice.cap = capacity slice.data = data d.releaseSlice(slice) return 0, errors.ErrInvalidCharacter(buf[cursor], "slice", cursor) } cursor++ } case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': return 0, d.errNumber(cursor) default: return 0, errors.ErrUnexpectedEndOfJSON("slice", cursor) } } } func (d *sliceDecoder) DecodePath(ctx *RuntimeContext, cursor, depth int64) ([][]byte, int64, error) { buf := ctx.Buf depth++ if depth > maxDecodeNestingDepth { return nil, 0, errors.ErrExceededMaxDepth(buf[cursor], cursor) } ret := [][]byte{} for { switch buf[cursor] { case ' ', '\n', '\t', '\r': cursor++ continue case 'n': if err := validateNull(buf, cursor); err != nil { return nil, 0, err } cursor += 4 return [][]byte{nullbytes}, cursor, nil case '[': cursor++ cursor = skipWhiteSpace(buf, cursor) if buf[cursor] == ']' { cursor++ return ret, cursor, nil } idx := 0 for { child, found, err := ctx.Option.Path.node.Index(idx) if err != nil { return nil, 0, err } if found { if child != nil { oldPath := ctx.Option.Path.node ctx.Option.Path.node = child paths, c, err := d.valueDecoder.DecodePath(ctx, cursor, depth) if err != nil { return nil, 0, err } ctx.Option.Path.node = oldPath ret = append(ret, paths...) cursor = c } else { start := cursor end, err := skipValue(buf, cursor, depth) if err != nil { return nil, 0, err } ret = append(ret, buf[start:end]) cursor = end } } else { c, err := skipValue(buf, cursor, depth) if err != nil { return nil, 0, err } cursor = c } cursor = skipWhiteSpace(buf, cursor) switch buf[cursor] { case ']': cursor++ return ret, cursor, nil case ',': idx++ default: return nil, 0, errors.ErrInvalidCharacter(buf[cursor], "slice", cursor) } cursor++ } case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': return nil, 0, d.errNumber(cursor) default: return nil, 0, errors.ErrUnexpectedEndOfJSON("slice", cursor) } } }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/goccy/go-json/internal/decoder/compile_norace.go
vendor/github.com/goccy/go-json/internal/decoder/compile_norace.go
//go:build !race // +build !race package decoder import ( "unsafe" "github.com/goccy/go-json/internal/runtime" ) func CompileToGetDecoder(typ *runtime.Type) (Decoder, error) { initDecoder() typeptr := uintptr(unsafe.Pointer(typ)) if typeptr > typeAddr.MaxTypeAddr { return compileToGetDecoderSlowPath(typeptr, typ) } index := (typeptr - typeAddr.BaseTypeAddr) >> typeAddr.AddrShift if dec := cachedDecoder[index]; dec != nil { return dec, nil } dec, err := compileHead(typ, map[uintptr]Decoder{}) if err != nil { return nil, err } cachedDecoder[index] = dec return dec, nil }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/goccy/go-json/internal/decoder/float.go
vendor/github.com/goccy/go-json/internal/decoder/float.go
package decoder import ( "strconv" "unsafe" "github.com/goccy/go-json/internal/errors" ) type floatDecoder struct { op func(unsafe.Pointer, float64) structName string fieldName string } func newFloatDecoder(structName, fieldName string, op func(unsafe.Pointer, float64)) *floatDecoder { return &floatDecoder{op: op, structName: structName, fieldName: fieldName} } var ( floatTable = [256]bool{ '0': true, '1': true, '2': true, '3': true, '4': true, '5': true, '6': true, '7': true, '8': true, '9': true, '.': true, 'e': true, 'E': true, '+': true, '-': true, } validEndNumberChar = [256]bool{ nul: true, ' ': true, '\t': true, '\r': true, '\n': true, ',': true, ':': true, '}': true, ']': true, } ) func floatBytes(s *Stream) []byte { start := s.cursor for { s.cursor++ if floatTable[s.char()] { continue } else if s.char() == nul { if s.read() { s.cursor-- // for retry current character continue } } break } return s.buf[start:s.cursor] } func (d *floatDecoder) decodeStreamByte(s *Stream) ([]byte, error) { for { switch s.char() { case ' ', '\n', '\t', '\r': s.cursor++ continue case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': return floatBytes(s), nil case 'n': if err := nullBytes(s); err != nil { return nil, err } return nil, nil case nul: if s.read() { continue } goto ERROR default: goto ERROR } } ERROR: return nil, errors.ErrUnexpectedEndOfJSON("float", s.totalOffset()) } func (d *floatDecoder) decodeByte(buf []byte, cursor int64) ([]byte, int64, error) { for { switch buf[cursor] { case ' ', '\n', '\t', '\r': cursor++ continue case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': start := cursor cursor++ for floatTable[buf[cursor]] { cursor++ } num := buf[start:cursor] return num, cursor, nil case 'n': if err := validateNull(buf, cursor); err != nil { return nil, 0, err } cursor += 4 return nil, cursor, nil default: return nil, 0, errors.ErrUnexpectedEndOfJSON("float", cursor) } } } func (d *floatDecoder) DecodeStream(s *Stream, depth int64, p unsafe.Pointer) error { bytes, err := d.decodeStreamByte(s) if err != nil { return err } if bytes == nil { return nil } str := *(*string)(unsafe.Pointer(&bytes)) f64, err := strconv.ParseFloat(str, 64) if err != nil { return errors.ErrSyntax(err.Error(), s.totalOffset()) } d.op(p, f64) return nil } func (d *floatDecoder) Decode(ctx *RuntimeContext, cursor, depth int64, p unsafe.Pointer) (int64, error) { buf := ctx.Buf bytes, c, err := d.decodeByte(buf, cursor) if err != nil { return 0, err } if bytes == nil { return c, nil } cursor = c if !validEndNumberChar[buf[cursor]] { return 0, errors.ErrUnexpectedEndOfJSON("float", cursor) } s := *(*string)(unsafe.Pointer(&bytes)) f64, err := strconv.ParseFloat(s, 64) if err != nil { return 0, errors.ErrSyntax(err.Error(), cursor) } d.op(p, f64) return cursor, nil } func (d *floatDecoder) DecodePath(ctx *RuntimeContext, cursor, depth int64) ([][]byte, int64, error) { buf := ctx.Buf bytes, c, err := d.decodeByte(buf, cursor) if err != nil { return nil, 0, err } if bytes == nil { return [][]byte{nullbytes}, c, nil } return [][]byte{bytes}, c, nil }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/goccy/go-json/internal/decoder/bytes.go
vendor/github.com/goccy/go-json/internal/decoder/bytes.go
package decoder import ( "encoding/base64" "fmt" "unsafe" "github.com/goccy/go-json/internal/errors" "github.com/goccy/go-json/internal/runtime" ) type bytesDecoder struct { typ *runtime.Type sliceDecoder Decoder stringDecoder *stringDecoder structName string fieldName string } func byteUnmarshalerSliceDecoder(typ *runtime.Type, structName string, fieldName string) Decoder { var unmarshalDecoder Decoder switch { case runtime.PtrTo(typ).Implements(unmarshalJSONType): unmarshalDecoder = newUnmarshalJSONDecoder(runtime.PtrTo(typ), structName, fieldName) case runtime.PtrTo(typ).Implements(unmarshalTextType): unmarshalDecoder = newUnmarshalTextDecoder(runtime.PtrTo(typ), structName, fieldName) default: unmarshalDecoder, _ = compileUint8(typ, structName, fieldName) } return newSliceDecoder(unmarshalDecoder, typ, 1, structName, fieldName) } func newBytesDecoder(typ *runtime.Type, structName string, fieldName string) *bytesDecoder { return &bytesDecoder{ typ: typ, sliceDecoder: byteUnmarshalerSliceDecoder(typ, structName, fieldName), stringDecoder: newStringDecoder(structName, fieldName), structName: structName, fieldName: fieldName, } } func (d *bytesDecoder) DecodeStream(s *Stream, depth int64, p unsafe.Pointer) error { bytes, err := d.decodeStreamBinary(s, depth, p) if err != nil { return err } if bytes == nil { s.reset() return nil } decodedLen := base64.StdEncoding.DecodedLen(len(bytes)) buf := make([]byte, decodedLen) n, err := base64.StdEncoding.Decode(buf, bytes) if err != nil { return err } *(*[]byte)(p) = buf[:n] s.reset() return nil } func (d *bytesDecoder) Decode(ctx *RuntimeContext, cursor, depth int64, p unsafe.Pointer) (int64, error) { bytes, c, err := d.decodeBinary(ctx, cursor, depth, p) if err != nil { return 0, err } if bytes == nil { return c, nil } cursor = c decodedLen := base64.StdEncoding.DecodedLen(len(bytes)) b := make([]byte, decodedLen) n, err := base64.StdEncoding.Decode(b, bytes) if err != nil { return 0, err } *(*[]byte)(p) = b[:n] return cursor, nil } func (d *bytesDecoder) DecodePath(ctx *RuntimeContext, cursor, depth int64) ([][]byte, int64, error) { return nil, 0, fmt.Errorf("json: []byte decoder does not support decode path") } func (d *bytesDecoder) decodeStreamBinary(s *Stream, depth int64, p unsafe.Pointer) ([]byte, error) { c := s.skipWhiteSpace() if c == '[' { if d.sliceDecoder == nil { return nil, &errors.UnmarshalTypeError{ Type: runtime.RType2Type(d.typ), Offset: s.totalOffset(), } } err := d.sliceDecoder.DecodeStream(s, depth, p) return nil, err } return d.stringDecoder.decodeStreamByte(s) } func (d *bytesDecoder) decodeBinary(ctx *RuntimeContext, cursor, depth int64, p unsafe.Pointer) ([]byte, int64, error) { buf := ctx.Buf cursor = skipWhiteSpace(buf, cursor) if buf[cursor] == '[' { if d.sliceDecoder == nil { return nil, 0, &errors.UnmarshalTypeError{ Type: runtime.RType2Type(d.typ), Offset: cursor, } } c, err := d.sliceDecoder.Decode(ctx, cursor, depth, p) if err != nil { return nil, 0, err } return nil, c, nil } return d.stringDecoder.decodeByte(buf, cursor) }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/goccy/go-json/internal/decoder/type.go
vendor/github.com/goccy/go-json/internal/decoder/type.go
package decoder import ( "context" "encoding" "encoding/json" "reflect" "unsafe" ) type Decoder interface { Decode(*RuntimeContext, int64, int64, unsafe.Pointer) (int64, error) DecodePath(*RuntimeContext, int64, int64) ([][]byte, int64, error) DecodeStream(*Stream, int64, unsafe.Pointer) error } const ( nul = '\000' maxDecodeNestingDepth = 10000 ) type unmarshalerContext interface { UnmarshalJSON(context.Context, []byte) error } var ( unmarshalJSONType = reflect.TypeOf((*json.Unmarshaler)(nil)).Elem() unmarshalJSONContextType = reflect.TypeOf((*unmarshalerContext)(nil)).Elem() unmarshalTextType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem() )
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/goccy/go-json/internal/decoder/number.go
vendor/github.com/goccy/go-json/internal/decoder/number.go
package decoder import ( "encoding/json" "strconv" "unsafe" "github.com/goccy/go-json/internal/errors" ) type numberDecoder struct { stringDecoder *stringDecoder op func(unsafe.Pointer, json.Number) structName string fieldName string } func newNumberDecoder(structName, fieldName string, op func(unsafe.Pointer, json.Number)) *numberDecoder { return &numberDecoder{ stringDecoder: newStringDecoder(structName, fieldName), op: op, structName: structName, fieldName: fieldName, } } func (d *numberDecoder) DecodeStream(s *Stream, depth int64, p unsafe.Pointer) error { bytes, err := d.decodeStreamByte(s) if err != nil { return err } if _, err := strconv.ParseFloat(*(*string)(unsafe.Pointer(&bytes)), 64); err != nil { return errors.ErrSyntax(err.Error(), s.totalOffset()) } d.op(p, json.Number(string(bytes))) s.reset() return nil } func (d *numberDecoder) Decode(ctx *RuntimeContext, cursor, depth int64, p unsafe.Pointer) (int64, error) { bytes, c, err := d.decodeByte(ctx.Buf, cursor) if err != nil { return 0, err } if _, err := strconv.ParseFloat(*(*string)(unsafe.Pointer(&bytes)), 64); err != nil { return 0, errors.ErrSyntax(err.Error(), c) } cursor = c s := *(*string)(unsafe.Pointer(&bytes)) d.op(p, json.Number(s)) return cursor, nil } func (d *numberDecoder) DecodePath(ctx *RuntimeContext, cursor, depth int64) ([][]byte, int64, error) { bytes, c, err := d.decodeByte(ctx.Buf, cursor) if err != nil { return nil, 0, err } if bytes == nil { return [][]byte{nullbytes}, c, nil } return [][]byte{bytes}, c, nil } func (d *numberDecoder) decodeStreamByte(s *Stream) ([]byte, error) { start := s.cursor for { switch s.char() { case ' ', '\n', '\t', '\r': s.cursor++ continue case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': return floatBytes(s), nil case 'n': if err := nullBytes(s); err != nil { return nil, err } return nil, nil case '"': return d.stringDecoder.decodeStreamByte(s) case nul: if s.read() { continue } goto ERROR default: goto ERROR } } ERROR: if s.cursor == start { return nil, errors.ErrInvalidBeginningOfValue(s.char(), s.totalOffset()) } return nil, errors.ErrUnexpectedEndOfJSON("json.Number", s.totalOffset()) } func (d *numberDecoder) decodeByte(buf []byte, cursor int64) ([]byte, int64, error) { for { switch buf[cursor] { case ' ', '\n', '\t', '\r': cursor++ continue case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': start := cursor cursor++ for floatTable[buf[cursor]] { cursor++ } num := buf[start:cursor] return num, cursor, nil case 'n': if err := validateNull(buf, cursor); err != nil { return nil, 0, err } cursor += 4 return nil, cursor, nil case '"': return d.stringDecoder.decodeByte(buf, cursor) default: return nil, 0, errors.ErrUnexpectedEndOfJSON("json.Number", cursor) } } }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/goccy/go-json/internal/decoder/string.go
vendor/github.com/goccy/go-json/internal/decoder/string.go
package decoder import ( "bytes" "fmt" "reflect" "unicode" "unicode/utf16" "unicode/utf8" "unsafe" "github.com/goccy/go-json/internal/errors" ) type stringDecoder struct { structName string fieldName string } func newStringDecoder(structName, fieldName string) *stringDecoder { return &stringDecoder{ structName: structName, fieldName: fieldName, } } func (d *stringDecoder) errUnmarshalType(typeName string, offset int64) *errors.UnmarshalTypeError { return &errors.UnmarshalTypeError{ Value: typeName, Type: reflect.TypeOf(""), Offset: offset, Struct: d.structName, Field: d.fieldName, } } func (d *stringDecoder) DecodeStream(s *Stream, depth int64, p unsafe.Pointer) error { bytes, err := d.decodeStreamByte(s) if err != nil { return err } if bytes == nil { return nil } **(**string)(unsafe.Pointer(&p)) = *(*string)(unsafe.Pointer(&bytes)) s.reset() return nil } func (d *stringDecoder) Decode(ctx *RuntimeContext, cursor, depth int64, p unsafe.Pointer) (int64, error) { bytes, c, err := d.decodeByte(ctx.Buf, cursor) if err != nil { return 0, err } if bytes == nil { return c, nil } cursor = c **(**string)(unsafe.Pointer(&p)) = *(*string)(unsafe.Pointer(&bytes)) return cursor, nil } func (d *stringDecoder) DecodePath(ctx *RuntimeContext, cursor, depth int64) ([][]byte, int64, error) { bytes, c, err := d.decodeByte(ctx.Buf, cursor) if err != nil { return nil, 0, err } if bytes == nil { return [][]byte{nullbytes}, c, nil } return [][]byte{bytes}, c, nil } var ( hexToInt = [256]int{ '0': 0, '1': 1, '2': 2, '3': 3, '4': 4, '5': 5, '6': 6, '7': 7, '8': 8, '9': 9, 'A': 10, 'B': 11, 'C': 12, 'D': 13, 'E': 14, 'F': 15, 'a': 10, 'b': 11, 'c': 12, 'd': 13, 'e': 14, 'f': 15, } ) func unicodeToRune(code []byte) rune { var r rune for i := 0; i < len(code); i++ { r = r*16 + rune(hexToInt[code[i]]) } return r } func readAtLeast(s *Stream, n int64, p *unsafe.Pointer) bool { for s.cursor+n >= s.length { if !s.read() { return false } *p = s.bufptr() } return true } func decodeUnicodeRune(s *Stream, p unsafe.Pointer) (rune, int64, unsafe.Pointer, error) { const defaultOffset = 5 const surrogateOffset = 11 if !readAtLeast(s, defaultOffset, &p) { return rune(0), 0, nil, errors.ErrInvalidCharacter(s.char(), "escaped string", s.totalOffset()) } r := unicodeToRune(s.buf[s.cursor+1 : s.cursor+defaultOffset]) if utf16.IsSurrogate(r) { if !readAtLeast(s, surrogateOffset, &p) { return unicode.ReplacementChar, defaultOffset, p, nil } if s.buf[s.cursor+defaultOffset] != '\\' || s.buf[s.cursor+defaultOffset+1] != 'u' { return unicode.ReplacementChar, defaultOffset, p, nil } r2 := unicodeToRune(s.buf[s.cursor+defaultOffset+2 : s.cursor+surrogateOffset]) if r := utf16.DecodeRune(r, r2); r != unicode.ReplacementChar { return r, surrogateOffset, p, nil } } return r, defaultOffset, p, nil } func decodeUnicode(s *Stream, p unsafe.Pointer) (unsafe.Pointer, error) { const backSlashAndULen = 2 // length of \u r, offset, pp, err := decodeUnicodeRune(s, p) if err != nil { return nil, err } unicode := []byte(string(r)) unicodeLen := int64(len(unicode)) s.buf = append(append(s.buf[:s.cursor-1], unicode...), s.buf[s.cursor+offset:]...) unicodeOrgLen := offset - 1 s.length = s.length - (backSlashAndULen + (unicodeOrgLen - unicodeLen)) s.cursor = s.cursor - backSlashAndULen + unicodeLen return pp, nil } func decodeEscapeString(s *Stream, p unsafe.Pointer) (unsafe.Pointer, error) { s.cursor++ RETRY: switch s.buf[s.cursor] { case '"': s.buf[s.cursor] = '"' case '\\': s.buf[s.cursor] = '\\' case '/': s.buf[s.cursor] = '/' case 'b': s.buf[s.cursor] = '\b' case 'f': s.buf[s.cursor] = '\f' case 'n': s.buf[s.cursor] = '\n' case 'r': s.buf[s.cursor] = '\r' case 't': s.buf[s.cursor] = '\t' case 'u': return decodeUnicode(s, p) case nul: if !s.read() { return nil, errors.ErrInvalidCharacter(s.char(), "escaped string", s.totalOffset()) } p = s.bufptr() goto RETRY default: return nil, errors.ErrUnexpectedEndOfJSON("string", s.totalOffset()) } s.buf = append(s.buf[:s.cursor-1], s.buf[s.cursor:]...) s.length-- s.cursor-- p = s.bufptr() return p, nil } var ( runeErrBytes = []byte(string(utf8.RuneError)) runeErrBytesLen = int64(len(runeErrBytes)) ) func stringBytes(s *Stream) ([]byte, error) { _, cursor, p := s.stat() cursor++ // skip double quote char start := cursor for { switch char(p, cursor) { case '\\': s.cursor = cursor pp, err := decodeEscapeString(s, p) if err != nil { return nil, err } p = pp cursor = s.cursor case '"': literal := s.buf[start:cursor] cursor++ s.cursor = cursor return literal, nil case // 0x00 is nul, 0x5c is '\\', 0x22 is '"' . 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, // 0x00-0x0F 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F, // 0x10-0x1F 0x20, 0x21 /*0x22,*/, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x2B, 0x2C, 0x2D, 0x2E, 0x2F, // 0x20-0x2F 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3A, 0x3B, 0x3C, 0x3D, 0x3E, 0x3F, // 0x30-0x3F 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4A, 0x4B, 0x4C, 0x4D, 0x4E, 0x4F, // 0x40-0x4F 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5A, 0x5B /*0x5C,*/, 0x5D, 0x5E, 0x5F, // 0x50-0x5F 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x6F, // 0x60-0x6F 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7A, 0x7B, 0x7C, 0x7D, 0x7E, 0x7F: // 0x70-0x7F // character is ASCII. skip to next char case 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8A, 0x8B, 0x8C, 0x8D, 0x8E, 0x8F, // 0x80-0x8F 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9A, 0x9B, 0x9C, 0x9D, 0x9E, 0x9F, // 0x90-0x9F 0xA0, 0xA1, 0xA2, 0xA3, 0xA4, 0xA5, 0xA6, 0xA7, 0xA8, 0xA9, 0xAA, 0xAB, 0xAC, 0xAD, 0xAE, 0xAF, // 0xA0-0xAF 0xB0, 0xB1, 0xB2, 0xB3, 0xB4, 0xB5, 0xB6, 0xB7, 0xB8, 0xB9, 0xBA, 0xBB, 0xBC, 0xBD, 0xBE, 0xBF, // 0xB0-0xBF 0xC0, 0xC1, // 0xC0-0xC1 0xF5, 0xF6, 0xF7, 0xF8, 0xF9, 0xFA, 0xFB, 0xFC, 0xFD, 0xFE, 0xFF: // 0xF5-0xFE // character is invalid s.buf = append(append(append([]byte{}, s.buf[:cursor]...), runeErrBytes...), s.buf[cursor+1:]...) _, _, p = s.stat() cursor += runeErrBytesLen s.length += runeErrBytesLen continue case nul: s.cursor = cursor if s.read() { _, cursor, p = s.stat() continue } goto ERROR case 0xEF: // RuneError is {0xEF, 0xBF, 0xBD} if s.buf[cursor+1] == 0xBF && s.buf[cursor+2] == 0xBD { // found RuneError: skip cursor += 2 break } fallthrough default: // multi bytes character if !utf8.FullRune(s.buf[cursor : len(s.buf)-1]) { s.cursor = cursor if s.read() { _, cursor, p = s.stat() continue } goto ERROR } r, size := utf8.DecodeRune(s.buf[cursor:]) if r == utf8.RuneError { s.buf = append(append(append([]byte{}, s.buf[:cursor]...), runeErrBytes...), s.buf[cursor+1:]...) cursor += runeErrBytesLen s.length += runeErrBytesLen _, _, p = s.stat() } else { cursor += int64(size) } continue } cursor++ } ERROR: return nil, errors.ErrUnexpectedEndOfJSON("string", s.totalOffset()) } func (d *stringDecoder) decodeStreamByte(s *Stream) ([]byte, error) { for { switch s.char() { case ' ', '\n', '\t', '\r': s.cursor++ continue case '[': return nil, d.errUnmarshalType("array", s.totalOffset()) case '{': return nil, d.errUnmarshalType("object", s.totalOffset()) case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': return nil, d.errUnmarshalType("number", s.totalOffset()) case '"': return stringBytes(s) case 'n': if err := nullBytes(s); err != nil { return nil, err } return nil, nil case nul: if s.read() { continue } } break } return nil, errors.ErrInvalidBeginningOfValue(s.char(), s.totalOffset()) } func (d *stringDecoder) decodeByte(buf []byte, cursor int64) ([]byte, int64, error) { for { switch buf[cursor] { case ' ', '\n', '\t', '\r': cursor++ case '[': return nil, 0, d.errUnmarshalType("array", cursor) case '{': return nil, 0, d.errUnmarshalType("object", cursor) case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': return nil, 0, d.errUnmarshalType("number", cursor) case '"': cursor++ start := cursor b := (*sliceHeader)(unsafe.Pointer(&buf)).data escaped := 0 for { switch char(b, cursor) { case '\\': escaped++ cursor++ switch char(b, cursor) { case '"', '\\', '/', 'b', 'f', 'n', 'r', 't': cursor++ case 'u': buflen := int64(len(buf)) if cursor+5 >= buflen { return nil, 0, errors.ErrUnexpectedEndOfJSON("escaped string", cursor) } for i := int64(1); i <= 4; i++ { c := char(b, cursor+i) if !(('0' <= c && c <= '9') || ('a' <= c && c <= 'f') || ('A' <= c && c <= 'F')) { return nil, 0, errors.ErrSyntax(fmt.Sprintf("json: invalid character %c in \\u hexadecimal character escape", c), cursor+i) } } cursor += 5 default: return nil, 0, errors.ErrUnexpectedEndOfJSON("escaped string", cursor) } continue case '"': literal := buf[start:cursor] if escaped > 0 { literal = literal[:unescapeString(literal)] } cursor++ return literal, cursor, nil case nul: return nil, 0, errors.ErrUnexpectedEndOfJSON("string", cursor) } cursor++ } case 'n': if err := validateNull(buf, cursor); err != nil { return nil, 0, err } cursor += 4 return nil, cursor, nil default: return nil, 0, errors.ErrInvalidBeginningOfValue(buf[cursor], cursor) } } } var unescapeMap = [256]byte{ '"': '"', '\\': '\\', '/': '/', 'b': '\b', 'f': '\f', 'n': '\n', 'r': '\r', 't': '\t', } func unsafeAdd(ptr unsafe.Pointer, offset int) unsafe.Pointer { return unsafe.Pointer(uintptr(ptr) + uintptr(offset)) } func unescapeString(buf []byte) int { p := (*sliceHeader)(unsafe.Pointer(&buf)).data end := unsafeAdd(p, len(buf)) src := unsafeAdd(p, bytes.IndexByte(buf, '\\')) dst := src for src != end { c := char(src, 0) if c == '\\' { escapeChar := char(src, 1) if escapeChar != 'u' { *(*byte)(dst) = unescapeMap[escapeChar] src = unsafeAdd(src, 2) dst = unsafeAdd(dst, 1) } else { v1 := hexToInt[char(src, 2)] v2 := hexToInt[char(src, 3)] v3 := hexToInt[char(src, 4)] v4 := hexToInt[char(src, 5)] code := rune((v1 << 12) | (v2 << 8) | (v3 << 4) | v4) if code >= 0xd800 && code < 0xdc00 && uintptr(unsafeAdd(src, 11)) < uintptr(end) { if char(src, 6) == '\\' && char(src, 7) == 'u' { v1 := hexToInt[char(src, 8)] v2 := hexToInt[char(src, 9)] v3 := hexToInt[char(src, 10)] v4 := hexToInt[char(src, 11)] lo := rune((v1 << 12) | (v2 << 8) | (v3 << 4) | v4) if lo >= 0xdc00 && lo < 0xe000 { code = (code-0xd800)<<10 | (lo - 0xdc00) + 0x10000 src = unsafeAdd(src, 6) } } } var b [utf8.UTFMax]byte n := utf8.EncodeRune(b[:], code) switch n { case 4: *(*byte)(unsafeAdd(dst, 3)) = b[3] fallthrough case 3: *(*byte)(unsafeAdd(dst, 2)) = b[2] fallthrough case 2: *(*byte)(unsafeAdd(dst, 1)) = b[1] fallthrough case 1: *(*byte)(unsafeAdd(dst, 0)) = b[0] } src = unsafeAdd(src, 6) dst = unsafeAdd(dst, n) } } else { *(*byte)(dst) = c src = unsafeAdd(src, 1) dst = unsafeAdd(dst, 1) } } return int(uintptr(dst) - uintptr(p)) }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/goccy/go-json/internal/decoder/int.go
vendor/github.com/goccy/go-json/internal/decoder/int.go
package decoder import ( "fmt" "reflect" "unsafe" "github.com/goccy/go-json/internal/errors" "github.com/goccy/go-json/internal/runtime" ) type intDecoder struct { typ *runtime.Type kind reflect.Kind op func(unsafe.Pointer, int64) structName string fieldName string } func newIntDecoder(typ *runtime.Type, structName, fieldName string, op func(unsafe.Pointer, int64)) *intDecoder { return &intDecoder{ typ: typ, kind: typ.Kind(), op: op, structName: structName, fieldName: fieldName, } } func (d *intDecoder) typeError(buf []byte, offset int64) *errors.UnmarshalTypeError { return &errors.UnmarshalTypeError{ Value: fmt.Sprintf("number %s", string(buf)), Type: runtime.RType2Type(d.typ), Struct: d.structName, Field: d.fieldName, Offset: offset, } } var ( pow10i64 = [...]int64{ 1e00, 1e01, 1e02, 1e03, 1e04, 1e05, 1e06, 1e07, 1e08, 1e09, 1e10, 1e11, 1e12, 1e13, 1e14, 1e15, 1e16, 1e17, 1e18, } pow10i64Len = len(pow10i64) ) func (d *intDecoder) parseInt(b []byte) (int64, error) { isNegative := false if b[0] == '-' { b = b[1:] isNegative = true } maxDigit := len(b) if maxDigit > pow10i64Len { return 0, fmt.Errorf("invalid length of number") } sum := int64(0) for i := 0; i < maxDigit; i++ { c := int64(b[i]) - 48 digitValue := pow10i64[maxDigit-i-1] sum += c * digitValue } if isNegative { return -1 * sum, nil } return sum, nil } var ( numTable = [256]bool{ '0': true, '1': true, '2': true, '3': true, '4': true, '5': true, '6': true, '7': true, '8': true, '9': true, } ) var ( numZeroBuf = []byte{'0'} ) func (d *intDecoder) decodeStreamByte(s *Stream) ([]byte, error) { for { switch s.char() { case ' ', '\n', '\t', '\r': s.cursor++ continue case '-': start := s.cursor for { s.cursor++ if numTable[s.char()] { continue } else if s.char() == nul { if s.read() { s.cursor-- // for retry current character continue } } break } num := s.buf[start:s.cursor] if len(num) < 2 { goto ERROR } return num, nil case '0': s.cursor++ return numZeroBuf, nil case '1', '2', '3', '4', '5', '6', '7', '8', '9': start := s.cursor for { s.cursor++ if numTable[s.char()] { continue } else if s.char() == nul { if s.read() { s.cursor-- // for retry current character continue } } break } num := s.buf[start:s.cursor] return num, nil case 'n': if err := nullBytes(s); err != nil { return nil, err } return nil, nil case nul: if s.read() { continue } goto ERROR default: return nil, d.typeError([]byte{s.char()}, s.totalOffset()) } } ERROR: return nil, errors.ErrUnexpectedEndOfJSON("number(integer)", s.totalOffset()) } func (d *intDecoder) decodeByte(buf []byte, cursor int64) ([]byte, int64, error) { b := (*sliceHeader)(unsafe.Pointer(&buf)).data for { switch char(b, cursor) { case ' ', '\n', '\t', '\r': cursor++ continue case '0': cursor++ return numZeroBuf, cursor, nil case '-', '1', '2', '3', '4', '5', '6', '7', '8', '9': start := cursor cursor++ for numTable[char(b, cursor)] { cursor++ } num := buf[start:cursor] return num, cursor, nil case 'n': if err := validateNull(buf, cursor); err != nil { return nil, 0, err } cursor += 4 return nil, cursor, nil default: return nil, 0, d.typeError([]byte{char(b, cursor)}, cursor) } } } func (d *intDecoder) DecodeStream(s *Stream, depth int64, p unsafe.Pointer) error { bytes, err := d.decodeStreamByte(s) if err != nil { return err } if bytes == nil { return nil } i64, err := d.parseInt(bytes) if err != nil { return d.typeError(bytes, s.totalOffset()) } switch d.kind { case reflect.Int8: if i64 < -1*(1<<7) || (1<<7) <= i64 { return d.typeError(bytes, s.totalOffset()) } case reflect.Int16: if i64 < -1*(1<<15) || (1<<15) <= i64 { return d.typeError(bytes, s.totalOffset()) } case reflect.Int32: if i64 < -1*(1<<31) || (1<<31) <= i64 { return d.typeError(bytes, s.totalOffset()) } } d.op(p, i64) s.reset() return nil } func (d *intDecoder) Decode(ctx *RuntimeContext, cursor, depth int64, p unsafe.Pointer) (int64, error) { bytes, c, err := d.decodeByte(ctx.Buf, cursor) if err != nil { return 0, err } if bytes == nil { return c, nil } cursor = c i64, err := d.parseInt(bytes) if err != nil { return 0, d.typeError(bytes, cursor) } switch d.kind { case reflect.Int8: if i64 < -1*(1<<7) || (1<<7) <= i64 { return 0, d.typeError(bytes, cursor) } case reflect.Int16: if i64 < -1*(1<<15) || (1<<15) <= i64 { return 0, d.typeError(bytes, cursor) } case reflect.Int32: if i64 < -1*(1<<31) || (1<<31) <= i64 { return 0, d.typeError(bytes, cursor) } } d.op(p, i64) return cursor, nil } func (d *intDecoder) DecodePath(ctx *RuntimeContext, cursor, depth int64) ([][]byte, int64, error) { return nil, 0, fmt.Errorf("json: int decoder does not support decode path") }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/goccy/go-json/internal/decoder/context.go
vendor/github.com/goccy/go-json/internal/decoder/context.go
package decoder import ( "sync" "unsafe" "github.com/goccy/go-json/internal/errors" ) type RuntimeContext struct { Buf []byte Option *Option } var ( runtimeContextPool = sync.Pool{ New: func() interface{} { return &RuntimeContext{ Option: &Option{}, } }, } ) func TakeRuntimeContext() *RuntimeContext { return runtimeContextPool.Get().(*RuntimeContext) } func ReleaseRuntimeContext(ctx *RuntimeContext) { runtimeContextPool.Put(ctx) } var ( isWhiteSpace = [256]bool{} ) func init() { isWhiteSpace[' '] = true isWhiteSpace['\n'] = true isWhiteSpace['\t'] = true isWhiteSpace['\r'] = true } func char(ptr unsafe.Pointer, offset int64) byte { return *(*byte)(unsafe.Pointer(uintptr(ptr) + uintptr(offset))) } func skipWhiteSpace(buf []byte, cursor int64) int64 { for isWhiteSpace[buf[cursor]] { cursor++ } return cursor } func skipObject(buf []byte, cursor, depth int64) (int64, error) { braceCount := 1 for { switch buf[cursor] { case '{': braceCount++ depth++ if depth > maxDecodeNestingDepth { return 0, errors.ErrExceededMaxDepth(buf[cursor], cursor) } case '}': depth-- braceCount-- if braceCount == 0 { return cursor + 1, nil } case '[': depth++ if depth > maxDecodeNestingDepth { return 0, errors.ErrExceededMaxDepth(buf[cursor], cursor) } case ']': depth-- case '"': for { cursor++ switch buf[cursor] { case '\\': cursor++ if buf[cursor] == nul { return 0, errors.ErrUnexpectedEndOfJSON("string of object", cursor) } case '"': goto SWITCH_OUT case nul: return 0, errors.ErrUnexpectedEndOfJSON("string of object", cursor) } } case nul: return 0, errors.ErrUnexpectedEndOfJSON("object of object", cursor) } SWITCH_OUT: cursor++ } } func skipArray(buf []byte, cursor, depth int64) (int64, error) { bracketCount := 1 for { switch buf[cursor] { case '[': bracketCount++ depth++ if depth > maxDecodeNestingDepth { return 0, errors.ErrExceededMaxDepth(buf[cursor], cursor) } case ']': bracketCount-- depth-- if bracketCount == 0 { return cursor + 1, nil } case '{': depth++ if depth > maxDecodeNestingDepth { return 0, errors.ErrExceededMaxDepth(buf[cursor], cursor) } case '}': depth-- case '"': for { cursor++ switch buf[cursor] { case '\\': cursor++ if buf[cursor] == nul { return 0, errors.ErrUnexpectedEndOfJSON("string of object", cursor) } case '"': goto SWITCH_OUT case nul: return 0, errors.ErrUnexpectedEndOfJSON("string of object", cursor) } } case nul: return 0, errors.ErrUnexpectedEndOfJSON("array of object", cursor) } SWITCH_OUT: cursor++ } } func skipValue(buf []byte, cursor, depth int64) (int64, error) { for { switch buf[cursor] { case ' ', '\t', '\n', '\r': cursor++ continue case '{': return skipObject(buf, cursor+1, depth+1) case '[': return skipArray(buf, cursor+1, depth+1) case '"': for { cursor++ switch buf[cursor] { case '\\': cursor++ if buf[cursor] == nul { return 0, errors.ErrUnexpectedEndOfJSON("string of object", cursor) } case '"': return cursor + 1, nil case nul: return 0, errors.ErrUnexpectedEndOfJSON("string of object", cursor) } } case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': for { cursor++ if floatTable[buf[cursor]] { continue } break } return cursor, nil case 't': if err := validateTrue(buf, cursor); err != nil { return 0, err } cursor += 4 return cursor, nil case 'f': if err := validateFalse(buf, cursor); err != nil { return 0, err } cursor += 5 return cursor, nil case 'n': if err := validateNull(buf, cursor); err != nil { return 0, err } cursor += 4 return cursor, nil default: return cursor, errors.ErrUnexpectedEndOfJSON("null", cursor) } } } func validateTrue(buf []byte, cursor int64) error { if cursor+3 >= int64(len(buf)) { return errors.ErrUnexpectedEndOfJSON("true", cursor) } if buf[cursor+1] != 'r' { return errors.ErrInvalidCharacter(buf[cursor+1], "true", cursor) } if buf[cursor+2] != 'u' { return errors.ErrInvalidCharacter(buf[cursor+2], "true", cursor) } if buf[cursor+3] != 'e' { return errors.ErrInvalidCharacter(buf[cursor+3], "true", cursor) } return nil } func validateFalse(buf []byte, cursor int64) error { if cursor+4 >= int64(len(buf)) { return errors.ErrUnexpectedEndOfJSON("false", cursor) } if buf[cursor+1] != 'a' { return errors.ErrInvalidCharacter(buf[cursor+1], "false", cursor) } if buf[cursor+2] != 'l' { return errors.ErrInvalidCharacter(buf[cursor+2], "false", cursor) } if buf[cursor+3] != 's' { return errors.ErrInvalidCharacter(buf[cursor+3], "false", cursor) } if buf[cursor+4] != 'e' { return errors.ErrInvalidCharacter(buf[cursor+4], "false", cursor) } return nil } func validateNull(buf []byte, cursor int64) error { if cursor+3 >= int64(len(buf)) { return errors.ErrUnexpectedEndOfJSON("null", cursor) } if buf[cursor+1] != 'u' { return errors.ErrInvalidCharacter(buf[cursor+1], "null", cursor) } if buf[cursor+2] != 'l' { return errors.ErrInvalidCharacter(buf[cursor+2], "null", cursor) } if buf[cursor+3] != 'l' { return errors.ErrInvalidCharacter(buf[cursor+3], "null", cursor) } return nil }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/goccy/go-json/internal/decoder/assign.go
vendor/github.com/goccy/go-json/internal/decoder/assign.go
package decoder import ( "fmt" "reflect" "strconv" ) var ( nilValue = reflect.ValueOf(nil) ) func AssignValue(src, dst reflect.Value) error { if dst.Type().Kind() != reflect.Ptr { return fmt.Errorf("invalid dst type. required pointer type: %T", dst.Type()) } casted, err := castValue(dst.Elem().Type(), src) if err != nil { return err } dst.Elem().Set(casted) return nil } func castValue(t reflect.Type, v reflect.Value) (reflect.Value, error) { switch t.Kind() { case reflect.Int: vv, err := castInt(v) if err != nil { return nilValue, err } return reflect.ValueOf(int(vv.Int())), nil case reflect.Int8: vv, err := castInt(v) if err != nil { return nilValue, err } return reflect.ValueOf(int8(vv.Int())), nil case reflect.Int16: vv, err := castInt(v) if err != nil { return nilValue, err } return reflect.ValueOf(int16(vv.Int())), nil case reflect.Int32: vv, err := castInt(v) if err != nil { return nilValue, err } return reflect.ValueOf(int32(vv.Int())), nil case reflect.Int64: return castInt(v) case reflect.Uint: vv, err := castUint(v) if err != nil { return nilValue, err } return reflect.ValueOf(uint(vv.Uint())), nil case reflect.Uint8: vv, err := castUint(v) if err != nil { return nilValue, err } return reflect.ValueOf(uint8(vv.Uint())), nil case reflect.Uint16: vv, err := castUint(v) if err != nil { return nilValue, err } return reflect.ValueOf(uint16(vv.Uint())), nil case reflect.Uint32: vv, err := castUint(v) if err != nil { return nilValue, err } return reflect.ValueOf(uint32(vv.Uint())), nil case reflect.Uint64: return castUint(v) case reflect.Uintptr: vv, err := castUint(v) if err != nil { return nilValue, err } return reflect.ValueOf(uintptr(vv.Uint())), nil case reflect.String: return castString(v) case reflect.Bool: return castBool(v) case reflect.Float32: vv, err := castFloat(v) if err != nil { return nilValue, err } return reflect.ValueOf(float32(vv.Float())), nil case reflect.Float64: return castFloat(v) case reflect.Array: return castArray(t, v) case reflect.Slice: return castSlice(t, v) case reflect.Map: return castMap(t, v) case reflect.Struct: return castStruct(t, v) } return v, nil } func castInt(v reflect.Value) (reflect.Value, error) { switch v.Type().Kind() { case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: return v, nil case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: return reflect.ValueOf(int64(v.Uint())), nil case reflect.String: i64, err := strconv.ParseInt(v.String(), 10, 64) if err != nil { return nilValue, err } return reflect.ValueOf(i64), nil case reflect.Bool: if v.Bool() { return reflect.ValueOf(int64(1)), nil } return reflect.ValueOf(int64(0)), nil case reflect.Float32, reflect.Float64: return reflect.ValueOf(int64(v.Float())), nil case reflect.Array: if v.Len() > 0 { return castInt(v.Index(0)) } return nilValue, fmt.Errorf("failed to cast to int64 from empty array") case reflect.Slice: if v.Len() > 0 { return castInt(v.Index(0)) } return nilValue, fmt.Errorf("failed to cast to int64 from empty slice") case reflect.Interface: return castInt(reflect.ValueOf(v.Interface())) case reflect.Map: return nilValue, fmt.Errorf("failed to cast to int64 from map") case reflect.Struct: return nilValue, fmt.Errorf("failed to cast to int64 from struct") case reflect.Ptr: return castInt(v.Elem()) } return nilValue, fmt.Errorf("failed to cast to int64 from %s", v.Type().Kind()) } func castUint(v reflect.Value) (reflect.Value, error) { switch v.Type().Kind() { case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: return reflect.ValueOf(uint64(v.Int())), nil case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: return v, nil case reflect.String: u64, err := strconv.ParseUint(v.String(), 10, 64) if err != nil { return nilValue, err } return reflect.ValueOf(u64), nil case reflect.Bool: if v.Bool() { return reflect.ValueOf(uint64(1)), nil } return reflect.ValueOf(uint64(0)), nil case reflect.Float32, reflect.Float64: return reflect.ValueOf(uint64(v.Float())), nil case reflect.Array: if v.Len() > 0 { return castUint(v.Index(0)) } return nilValue, fmt.Errorf("failed to cast to uint64 from empty array") case reflect.Slice: if v.Len() > 0 { return castUint(v.Index(0)) } return nilValue, fmt.Errorf("failed to cast to uint64 from empty slice") case reflect.Interface: return castUint(reflect.ValueOf(v.Interface())) case reflect.Map: return nilValue, fmt.Errorf("failed to cast to uint64 from map") case reflect.Struct: return nilValue, fmt.Errorf("failed to cast to uint64 from struct") case reflect.Ptr: return castUint(v.Elem()) } return nilValue, fmt.Errorf("failed to cast to uint64 from %s", v.Type().Kind()) } func castString(v reflect.Value) (reflect.Value, error) { switch v.Type().Kind() { case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: return reflect.ValueOf(fmt.Sprint(v.Int())), nil case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: return reflect.ValueOf(fmt.Sprint(v.Uint())), nil case reflect.String: return v, nil case reflect.Bool: if v.Bool() { return reflect.ValueOf("true"), nil } return reflect.ValueOf("false"), nil case reflect.Float32, reflect.Float64: return reflect.ValueOf(fmt.Sprint(v.Float())), nil case reflect.Array: if v.Len() > 0 { return castString(v.Index(0)) } return nilValue, fmt.Errorf("failed to cast to string from empty array") case reflect.Slice: if v.Len() > 0 { return castString(v.Index(0)) } return nilValue, fmt.Errorf("failed to cast to string from empty slice") case reflect.Interface: return castString(reflect.ValueOf(v.Interface())) case reflect.Map: return nilValue, fmt.Errorf("failed to cast to string from map") case reflect.Struct: return nilValue, fmt.Errorf("failed to cast to string from struct") case reflect.Ptr: return castString(v.Elem()) } return nilValue, fmt.Errorf("failed to cast to string from %s", v.Type().Kind()) } func castBool(v reflect.Value) (reflect.Value, error) { switch v.Type().Kind() { case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: switch v.Int() { case 0: return reflect.ValueOf(false), nil case 1: return reflect.ValueOf(true), nil } return nilValue, fmt.Errorf("failed to cast to bool from %d", v.Int()) case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: switch v.Uint() { case 0: return reflect.ValueOf(false), nil case 1: return reflect.ValueOf(true), nil } return nilValue, fmt.Errorf("failed to cast to bool from %d", v.Uint()) case reflect.String: b, err := strconv.ParseBool(v.String()) if err != nil { return nilValue, err } return reflect.ValueOf(b), nil case reflect.Bool: return v, nil case reflect.Float32, reflect.Float64: switch v.Float() { case 0: return reflect.ValueOf(false), nil case 1: return reflect.ValueOf(true), nil } return nilValue, fmt.Errorf("failed to cast to bool from %f", v.Float()) case reflect.Array: if v.Len() > 0 { return castBool(v.Index(0)) } return nilValue, fmt.Errorf("failed to cast to string from empty array") case reflect.Slice: if v.Len() > 0 { return castBool(v.Index(0)) } return nilValue, fmt.Errorf("failed to cast to string from empty slice") case reflect.Interface: return castBool(reflect.ValueOf(v.Interface())) case reflect.Map: return nilValue, fmt.Errorf("failed to cast to string from map") case reflect.Struct: return nilValue, fmt.Errorf("failed to cast to string from struct") case reflect.Ptr: return castBool(v.Elem()) } return nilValue, fmt.Errorf("failed to cast to bool from %s", v.Type().Kind()) } func castFloat(v reflect.Value) (reflect.Value, error) { switch v.Type().Kind() { case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: return reflect.ValueOf(float64(v.Int())), nil case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: return reflect.ValueOf(float64(v.Uint())), nil case reflect.String: f64, err := strconv.ParseFloat(v.String(), 64) if err != nil { return nilValue, err } return reflect.ValueOf(f64), nil case reflect.Bool: if v.Bool() { return reflect.ValueOf(float64(1)), nil } return reflect.ValueOf(float64(0)), nil case reflect.Float32, reflect.Float64: return v, nil case reflect.Array: if v.Len() > 0 { return castFloat(v.Index(0)) } return nilValue, fmt.Errorf("failed to cast to float64 from empty array") case reflect.Slice: if v.Len() > 0 { return castFloat(v.Index(0)) } return nilValue, fmt.Errorf("failed to cast to float64 from empty slice") case reflect.Interface: return castFloat(reflect.ValueOf(v.Interface())) case reflect.Map: return nilValue, fmt.Errorf("failed to cast to float64 from map") case reflect.Struct: return nilValue, fmt.Errorf("failed to cast to float64 from struct") case reflect.Ptr: return castFloat(v.Elem()) } return nilValue, fmt.Errorf("failed to cast to float64 from %s", v.Type().Kind()) } func castArray(t reflect.Type, v reflect.Value) (reflect.Value, error) { kind := v.Type().Kind() if kind == reflect.Interface { return castArray(t, reflect.ValueOf(v.Interface())) } if kind != reflect.Slice && kind != reflect.Array { return nilValue, fmt.Errorf("failed to cast to array from %s", kind) } if t.Elem() == v.Type().Elem() { return v, nil } if t.Len() != v.Len() { return nilValue, fmt.Errorf("failed to cast [%d]array from slice of %d length", t.Len(), v.Len()) } ret := reflect.New(t).Elem() for i := 0; i < v.Len(); i++ { vv, err := castValue(t.Elem(), v.Index(i)) if err != nil { return nilValue, err } ret.Index(i).Set(vv) } return ret, nil } func castSlice(t reflect.Type, v reflect.Value) (reflect.Value, error) { kind := v.Type().Kind() if kind == reflect.Interface { return castSlice(t, reflect.ValueOf(v.Interface())) } if kind != reflect.Slice && kind != reflect.Array { return nilValue, fmt.Errorf("failed to cast to slice from %s", kind) } if t.Elem() == v.Type().Elem() { return v, nil } ret := reflect.MakeSlice(t, v.Len(), v.Len()) for i := 0; i < v.Len(); i++ { vv, err := castValue(t.Elem(), v.Index(i)) if err != nil { return nilValue, err } ret.Index(i).Set(vv) } return ret, nil } func castMap(t reflect.Type, v reflect.Value) (reflect.Value, error) { ret := reflect.MakeMap(t) switch v.Type().Kind() { case reflect.Map: iter := v.MapRange() for iter.Next() { key, err := castValue(t.Key(), iter.Key()) if err != nil { return nilValue, err } value, err := castValue(t.Elem(), iter.Value()) if err != nil { return nilValue, err } ret.SetMapIndex(key, value) } return ret, nil case reflect.Interface: return castMap(t, reflect.ValueOf(v.Interface())) case reflect.Slice: if v.Len() > 0 { return castMap(t, v.Index(0)) } return nilValue, fmt.Errorf("failed to cast to map from empty slice") } return nilValue, fmt.Errorf("failed to cast to map from %s", v.Type().Kind()) } func castStruct(t reflect.Type, v reflect.Value) (reflect.Value, error) { ret := reflect.New(t).Elem() switch v.Type().Kind() { case reflect.Map: iter := v.MapRange() for iter.Next() { key := iter.Key() k, err := castString(key) if err != nil { return nilValue, err } fieldName := k.String() field, ok := t.FieldByName(fieldName) if ok { value, err := castValue(field.Type, iter.Value()) if err != nil { return nilValue, err } ret.FieldByName(fieldName).Set(value) } } return ret, nil case reflect.Struct: for i := 0; i < v.Type().NumField(); i++ { name := v.Type().Field(i).Name ret.FieldByName(name).Set(v.FieldByName(name)) } return ret, nil case reflect.Interface: return castStruct(t, reflect.ValueOf(v.Interface())) case reflect.Slice: if v.Len() > 0 { return castStruct(t, v.Index(0)) } return nilValue, fmt.Errorf("failed to cast to struct from empty slice") default: return nilValue, fmt.Errorf("failed to cast to struct from %s", v.Type().Kind()) } }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/goccy/go-json/internal/decoder/unmarshal_text.go
vendor/github.com/goccy/go-json/internal/decoder/unmarshal_text.go
package decoder import ( "bytes" "encoding" "fmt" "unicode" "unicode/utf16" "unicode/utf8" "unsafe" "github.com/goccy/go-json/internal/errors" "github.com/goccy/go-json/internal/runtime" ) type unmarshalTextDecoder struct { typ *runtime.Type structName string fieldName string } func newUnmarshalTextDecoder(typ *runtime.Type, structName, fieldName string) *unmarshalTextDecoder { return &unmarshalTextDecoder{ typ: typ, structName: structName, fieldName: fieldName, } } func (d *unmarshalTextDecoder) annotateError(cursor int64, err error) { switch e := err.(type) { case *errors.UnmarshalTypeError: e.Struct = d.structName e.Field = d.fieldName case *errors.SyntaxError: e.Offset = cursor } } var ( nullbytes = []byte(`null`) ) func (d *unmarshalTextDecoder) DecodeStream(s *Stream, depth int64, p unsafe.Pointer) error { s.skipWhiteSpace() start := s.cursor if err := s.skipValue(depth); err != nil { return err } src := s.buf[start:s.cursor] if len(src) > 0 { switch src[0] { case '[': return &errors.UnmarshalTypeError{ Value: "array", Type: runtime.RType2Type(d.typ), Offset: s.totalOffset(), } case '{': return &errors.UnmarshalTypeError{ Value: "object", Type: runtime.RType2Type(d.typ), Offset: s.totalOffset(), } case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': return &errors.UnmarshalTypeError{ Value: "number", Type: runtime.RType2Type(d.typ), Offset: s.totalOffset(), } case 'n': if bytes.Equal(src, nullbytes) { *(*unsafe.Pointer)(p) = nil return nil } } } dst := make([]byte, len(src)) copy(dst, src) if b, ok := unquoteBytes(dst); ok { dst = b } v := *(*interface{})(unsafe.Pointer(&emptyInterface{ typ: d.typ, ptr: p, })) if err := v.(encoding.TextUnmarshaler).UnmarshalText(dst); err != nil { d.annotateError(s.cursor, err) return err } return nil } func (d *unmarshalTextDecoder) Decode(ctx *RuntimeContext, cursor, depth int64, p unsafe.Pointer) (int64, error) { buf := ctx.Buf cursor = skipWhiteSpace(buf, cursor) start := cursor end, err := skipValue(buf, cursor, depth) if err != nil { return 0, err } src := buf[start:end] if len(src) > 0 { switch src[0] { case '[': return 0, &errors.UnmarshalTypeError{ Value: "array", Type: runtime.RType2Type(d.typ), Offset: start, } case '{': return 0, &errors.UnmarshalTypeError{ Value: "object", Type: runtime.RType2Type(d.typ), Offset: start, } case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': return 0, &errors.UnmarshalTypeError{ Value: "number", Type: runtime.RType2Type(d.typ), Offset: start, } case 'n': if bytes.Equal(src, nullbytes) { *(*unsafe.Pointer)(p) = nil return end, nil } } } if s, ok := unquoteBytes(src); ok { src = s } v := *(*interface{})(unsafe.Pointer(&emptyInterface{ typ: d.typ, ptr: *(*unsafe.Pointer)(unsafe.Pointer(&p)), })) if err := v.(encoding.TextUnmarshaler).UnmarshalText(src); err != nil { d.annotateError(cursor, err) return 0, err } return end, nil } func (d *unmarshalTextDecoder) DecodePath(ctx *RuntimeContext, cursor, depth int64) ([][]byte, int64, error) { return nil, 0, fmt.Errorf("json: unmarshal text decoder does not support decode path") } func unquoteBytes(s []byte) (t []byte, ok bool) { //nolint: nonamedreturns length := len(s) if length < 2 || s[0] != '"' || s[length-1] != '"' { return } s = s[1 : length-1] length -= 2 // Check for unusual characters. If there are none, // then no unquoting is needed, so return a slice of the // original bytes. r := 0 for r < length { c := s[r] if c == '\\' || c == '"' || c < ' ' { break } if c < utf8.RuneSelf { r++ continue } rr, size := utf8.DecodeRune(s[r:]) if rr == utf8.RuneError && size == 1 { break } r += size } if r == length { return s, true } b := make([]byte, length+2*utf8.UTFMax) w := copy(b, s[0:r]) for r < length { // Out of room? Can only happen if s is full of // malformed UTF-8 and we're replacing each // byte with RuneError. if w >= len(b)-2*utf8.UTFMax { nb := make([]byte, (len(b)+utf8.UTFMax)*2) copy(nb, b[0:w]) b = nb } switch c := s[r]; { case c == '\\': r++ if r >= length { return } switch s[r] { default: return case '"', '\\', '/', '\'': b[w] = s[r] r++ w++ case 'b': b[w] = '\b' r++ w++ case 'f': b[w] = '\f' r++ w++ case 'n': b[w] = '\n' r++ w++ case 'r': b[w] = '\r' r++ w++ case 't': b[w] = '\t' r++ w++ case 'u': r-- rr := getu4(s[r:]) if rr < 0 { return } r += 6 if utf16.IsSurrogate(rr) { rr1 := getu4(s[r:]) if dec := utf16.DecodeRune(rr, rr1); dec != unicode.ReplacementChar { // A valid pair; consume. r += 6 w += utf8.EncodeRune(b[w:], dec) break } // Invalid surrogate; fall back to replacement rune. rr = unicode.ReplacementChar } w += utf8.EncodeRune(b[w:], rr) } // Quote, control characters are invalid. case c == '"', c < ' ': return // ASCII case c < utf8.RuneSelf: b[w] = c r++ w++ // Coerce to well-formed UTF-8. default: rr, size := utf8.DecodeRune(s[r:]) r += size w += utf8.EncodeRune(b[w:], rr) } } return b[0:w], true } func getu4(s []byte) rune { if len(s) < 6 || s[0] != '\\' || s[1] != 'u' { return -1 } var r rune for _, c := range s[2:6] { switch { case '0' <= c && c <= '9': c = c - '0' case 'a' <= c && c <= 'f': c = c - 'a' + 10 case 'A' <= c && c <= 'F': c = c - 'A' + 10 default: return -1 } r = r*16 + rune(c) } return r }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/goccy/go-json/internal/decoder/func.go
vendor/github.com/goccy/go-json/internal/decoder/func.go
package decoder import ( "bytes" "fmt" "unsafe" "github.com/goccy/go-json/internal/errors" "github.com/goccy/go-json/internal/runtime" ) type funcDecoder struct { typ *runtime.Type structName string fieldName string } func newFuncDecoder(typ *runtime.Type, structName, fieldName string) *funcDecoder { fnDecoder := &funcDecoder{typ, structName, fieldName} return fnDecoder } func (d *funcDecoder) DecodeStream(s *Stream, depth int64, p unsafe.Pointer) error { s.skipWhiteSpace() start := s.cursor if err := s.skipValue(depth); err != nil { return err } src := s.buf[start:s.cursor] if len(src) > 0 { switch src[0] { case '"': return &errors.UnmarshalTypeError{ Value: "string", Type: runtime.RType2Type(d.typ), Offset: s.totalOffset(), } case '[': return &errors.UnmarshalTypeError{ Value: "array", Type: runtime.RType2Type(d.typ), Offset: s.totalOffset(), } case '{': return &errors.UnmarshalTypeError{ Value: "object", Type: runtime.RType2Type(d.typ), Offset: s.totalOffset(), } case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': return &errors.UnmarshalTypeError{ Value: "number", Type: runtime.RType2Type(d.typ), Offset: s.totalOffset(), } case 'n': if err := nullBytes(s); err != nil { return err } *(*unsafe.Pointer)(p) = nil return nil case 't': if err := trueBytes(s); err == nil { return &errors.UnmarshalTypeError{ Value: "boolean", Type: runtime.RType2Type(d.typ), Offset: s.totalOffset(), } } case 'f': if err := falseBytes(s); err == nil { return &errors.UnmarshalTypeError{ Value: "boolean", Type: runtime.RType2Type(d.typ), Offset: s.totalOffset(), } } } } return errors.ErrInvalidBeginningOfValue(s.buf[s.cursor], s.totalOffset()) } func (d *funcDecoder) Decode(ctx *RuntimeContext, cursor, depth int64, p unsafe.Pointer) (int64, error) { buf := ctx.Buf cursor = skipWhiteSpace(buf, cursor) start := cursor end, err := skipValue(buf, cursor, depth) if err != nil { return 0, err } src := buf[start:end] if len(src) > 0 { switch src[0] { case '"': return 0, &errors.UnmarshalTypeError{ Value: "string", Type: runtime.RType2Type(d.typ), Offset: start, } case '[': return 0, &errors.UnmarshalTypeError{ Value: "array", Type: runtime.RType2Type(d.typ), Offset: start, } case '{': return 0, &errors.UnmarshalTypeError{ Value: "object", Type: runtime.RType2Type(d.typ), Offset: start, } case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': return 0, &errors.UnmarshalTypeError{ Value: "number", Type: runtime.RType2Type(d.typ), Offset: start, } case 'n': if bytes.Equal(src, nullbytes) { *(*unsafe.Pointer)(p) = nil return end, nil } case 't': if err := validateTrue(buf, start); err == nil { return 0, &errors.UnmarshalTypeError{ Value: "boolean", Type: runtime.RType2Type(d.typ), Offset: start, } } case 'f': if err := validateFalse(buf, start); err == nil { return 0, &errors.UnmarshalTypeError{ Value: "boolean", Type: runtime.RType2Type(d.typ), Offset: start, } } } } return cursor, errors.ErrInvalidBeginningOfValue(buf[cursor], cursor) } func (d *funcDecoder) DecodePath(ctx *RuntimeContext, cursor, depth int64) ([][]byte, int64, error) { return nil, 0, fmt.Errorf("json: func decoder does not support decode path") }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/goccy/go-json/internal/decoder/wrapped_string.go
vendor/github.com/goccy/go-json/internal/decoder/wrapped_string.go
package decoder import ( "fmt" "reflect" "unsafe" "github.com/goccy/go-json/internal/runtime" ) type wrappedStringDecoder struct { typ *runtime.Type dec Decoder stringDecoder *stringDecoder structName string fieldName string isPtrType bool } func newWrappedStringDecoder(typ *runtime.Type, dec Decoder, structName, fieldName string) *wrappedStringDecoder { return &wrappedStringDecoder{ typ: typ, dec: dec, stringDecoder: newStringDecoder(structName, fieldName), structName: structName, fieldName: fieldName, isPtrType: typ.Kind() == reflect.Ptr, } } func (d *wrappedStringDecoder) DecodeStream(s *Stream, depth int64, p unsafe.Pointer) error { bytes, err := d.stringDecoder.decodeStreamByte(s) if err != nil { return err } if bytes == nil { if d.isPtrType { *(*unsafe.Pointer)(p) = nil } return nil } b := make([]byte, len(bytes)+1) copy(b, bytes) if _, err := d.dec.Decode(&RuntimeContext{Buf: b}, 0, depth, p); err != nil { return err } return nil } func (d *wrappedStringDecoder) Decode(ctx *RuntimeContext, cursor, depth int64, p unsafe.Pointer) (int64, error) { bytes, c, err := d.stringDecoder.decodeByte(ctx.Buf, cursor) if err != nil { return 0, err } if bytes == nil { if d.isPtrType { *(*unsafe.Pointer)(p) = nil } return c, nil } bytes = append(bytes, nul) oldBuf := ctx.Buf ctx.Buf = bytes if _, err := d.dec.Decode(ctx, 0, depth, p); err != nil { return 0, err } ctx.Buf = oldBuf return c, nil } func (d *wrappedStringDecoder) DecodePath(ctx *RuntimeContext, cursor, depth int64) ([][]byte, int64, error) { return nil, 0, fmt.Errorf("json: wrapped string decoder does not support decode path") }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go
vendor/github.com/fsnotify/fsnotify/backend_kqueue.go
//go:build freebsd || openbsd || netbsd || dragonfly || darwin // +build freebsd openbsd netbsd dragonfly darwin // Note: the documentation on the Watcher type and methods is generated from // mkdoc.zsh package fsnotify import ( "errors" "fmt" "os" "path/filepath" "sync" "golang.org/x/sys/unix" ) // Watcher watches a set of paths, delivering events on a channel. // // A watcher should not be copied (e.g. pass it by pointer, rather than by // value). // // # Linux notes // // When a file is removed a Remove event won't be emitted until all file // descriptors are closed, and deletes will always emit a Chmod. For example: // // fp := os.Open("file") // os.Remove("file") // Triggers Chmod // fp.Close() // Triggers Remove // // This is the event that inotify sends, so not much can be changed about this. // // The fs.inotify.max_user_watches sysctl variable specifies the upper limit // for the number of watches per user, and fs.inotify.max_user_instances // specifies the maximum number of inotify instances per user. Every Watcher you // create is an "instance", and every path you add is a "watch". // // These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and // /proc/sys/fs/inotify/max_user_instances // // To increase them you can use sysctl or write the value to the /proc file: // // # Default values on Linux 5.18 // sysctl fs.inotify.max_user_watches=124983 // sysctl fs.inotify.max_user_instances=128 // // To make the changes persist on reboot edit /etc/sysctl.conf or // /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check // your distro's documentation): // // fs.inotify.max_user_watches=124983 // fs.inotify.max_user_instances=128 // // Reaching the limit will result in a "no space left on device" or "too many open // files" error. // // # kqueue notes (macOS, BSD) // // kqueue requires opening a file descriptor for every file that's being watched; // so if you're watching a directory with five files then that's six file // descriptors. You will run in to your system's "max open files" limit faster on // these platforms. // // The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to // control the maximum number of open files, as well as /etc/login.conf on BSD // systems. // // # Windows notes // // Paths can be added as "C:\path\to\dir", but forward slashes // ("C:/path/to/dir") will also work. // // When a watched directory is removed it will always send an event for the // directory itself, but may not send events for all files in that directory. // Sometimes it will send events for all times, sometimes it will send no // events, and often only for some files. // // The default ReadDirectoryChangesW() buffer size is 64K, which is the largest // value that is guaranteed to work with SMB filesystems. If you have many // events in quick succession this may not be enough, and you will have to use // [WithBufferSize] to increase the value. type Watcher struct { // Events sends the filesystem change events. // // fsnotify can send the following events; a "path" here can refer to a // file, directory, symbolic link, or special file like a FIFO. // // fsnotify.Create A new path was created; this may be followed by one // or more Write events if data also gets written to a // file. // // fsnotify.Remove A path was removed. // // fsnotify.Rename A path was renamed. A rename is always sent with the // old path as Event.Name, and a Create event will be // sent with the new name. Renames are only sent for // paths that are currently watched; e.g. moving an // unmonitored file into a monitored directory will // show up as just a Create. Similarly, renaming a file // to outside a monitored directory will show up as // only a Rename. // // fsnotify.Write A file or named pipe was written to. A Truncate will // also trigger a Write. A single "write action" // initiated by the user may show up as one or multiple // writes, depending on when the system syncs things to // disk. For example when compiling a large Go program // you may get hundreds of Write events, and you may // want to wait until you've stopped receiving them // (see the dedup example in cmd/fsnotify). // // Some systems may send Write event for directories // when the directory content changes. // // fsnotify.Chmod Attributes were changed. On Linux this is also sent // when a file is removed (or more accurately, when a // link to an inode is removed). On kqueue it's sent // when a file is truncated. On Windows it's never // sent. Events chan Event // Errors sends any errors. // // ErrEventOverflow is used to indicate there are too many events: // // - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl) // - windows: The buffer size is too small; WithBufferSize() can be used to increase it. // - kqueue, fen: Not used. Errors chan error done chan struct{} kq int // File descriptor (as returned by the kqueue() syscall). closepipe [2]int // Pipe used for closing. mu sync.Mutex // Protects access to watcher data watches map[string]int // Watched file descriptors (key: path). watchesByDir map[string]map[int]struct{} // Watched file descriptors indexed by the parent directory (key: dirname(path)). userWatches map[string]struct{} // Watches added with Watcher.Add() dirFlags map[string]uint32 // Watched directories to fflags used in kqueue. paths map[int]pathInfo // File descriptors to path names for processing kqueue events. fileExists map[string]struct{} // Keep track of if we know this file exists (to stop duplicate create events). isClosed bool // Set to true when Close() is first called } type pathInfo struct { name string isDir bool } // NewWatcher creates a new Watcher. func NewWatcher() (*Watcher, error) { return NewBufferedWatcher(0) } // NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events // channel. // // The main use case for this is situations with a very large number of events // where the kernel buffer size can't be increased (e.g. due to lack of // permissions). An unbuffered Watcher will perform better for almost all use // cases, and whenever possible you will be better off increasing the kernel // buffers instead of adding a large userspace buffer. func NewBufferedWatcher(sz uint) (*Watcher, error) { kq, closepipe, err := newKqueue() if err != nil { return nil, err } w := &Watcher{ kq: kq, closepipe: closepipe, watches: make(map[string]int), watchesByDir: make(map[string]map[int]struct{}), dirFlags: make(map[string]uint32), paths: make(map[int]pathInfo), fileExists: make(map[string]struct{}), userWatches: make(map[string]struct{}), Events: make(chan Event, sz), Errors: make(chan error), done: make(chan struct{}), } go w.readEvents() return w, nil } // newKqueue creates a new kernel event queue and returns a descriptor. // // This registers a new event on closepipe, which will trigger an event when // it's closed. This way we can use kevent() without timeout/polling; without // the closepipe, it would block forever and we wouldn't be able to stop it at // all. func newKqueue() (kq int, closepipe [2]int, err error) { kq, err = unix.Kqueue() if kq == -1 { return kq, closepipe, err } // Register the close pipe. err = unix.Pipe(closepipe[:]) if err != nil { unix.Close(kq) return kq, closepipe, err } // Register changes to listen on the closepipe. changes := make([]unix.Kevent_t, 1) // SetKevent converts int to the platform-specific types. unix.SetKevent(&changes[0], closepipe[0], unix.EVFILT_READ, unix.EV_ADD|unix.EV_ENABLE|unix.EV_ONESHOT) ok, err := unix.Kevent(kq, changes, nil, nil) if ok == -1 { unix.Close(kq) unix.Close(closepipe[0]) unix.Close(closepipe[1]) return kq, closepipe, err } return kq, closepipe, nil } // Returns true if the event was sent, or false if watcher is closed. func (w *Watcher) sendEvent(e Event) bool { select { case w.Events <- e: return true case <-w.done: return false } } // Returns true if the error was sent, or false if watcher is closed. func (w *Watcher) sendError(err error) bool { select { case w.Errors <- err: return true case <-w.done: return false } } // Close removes all watches and closes the Events channel. func (w *Watcher) Close() error { w.mu.Lock() if w.isClosed { w.mu.Unlock() return nil } w.isClosed = true // copy paths to remove while locked pathsToRemove := make([]string, 0, len(w.watches)) for name := range w.watches { pathsToRemove = append(pathsToRemove, name) } w.mu.Unlock() // Unlock before calling Remove, which also locks for _, name := range pathsToRemove { w.Remove(name) } // Send "quit" message to the reader goroutine. unix.Close(w.closepipe[1]) close(w.done) return nil } // Add starts monitoring the path for changes. // // A path can only be watched once; watching it more than once is a no-op and will // not return an error. Paths that do not yet exist on the filesystem cannot be // watched. // // A watch will be automatically removed if the watched path is deleted or // renamed. The exception is the Windows backend, which doesn't remove the // watcher on renames. // // Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special // filesystems (/proc, /sys, etc.) generally don't work. // // Returns [ErrClosed] if [Watcher.Close] was called. // // See [Watcher.AddWith] for a version that allows adding options. // // # Watching directories // // All files in a directory are monitored, including new files that are created // after the watcher is started. Subdirectories are not watched (i.e. it's // non-recursive). // // # Watching files // // Watching individual files (rather than directories) is generally not // recommended as many programs (especially editors) update files atomically: it // will write to a temporary file which is then moved to to destination, // overwriting the original (or some variant thereof). The watcher on the // original file is now lost, as that no longer exists. // // The upshot of this is that a power failure or crash won't leave a // half-written file. // // Watch the parent directory and use Event.Name to filter out files you're not // interested in. There is an example of this in cmd/fsnotify/file.go. func (w *Watcher) Add(name string) error { return w.AddWith(name) } // AddWith is like [Watcher.Add], but allows adding options. When using Add() // the defaults described below are used. // // Possible options are: // // - [WithBufferSize] sets the buffer size for the Windows backend; no-op on // other platforms. The default is 64K (65536 bytes). func (w *Watcher) AddWith(name string, opts ...addOpt) error { _ = getOptions(opts...) w.mu.Lock() w.userWatches[name] = struct{}{} w.mu.Unlock() _, err := w.addWatch(name, noteAllEvents) return err } // Remove stops monitoring the path for changes. // // Directories are always removed non-recursively. For example, if you added // /tmp/dir and /tmp/dir/subdir then you will need to remove both. // // Removing a path that has not yet been added returns [ErrNonExistentWatch]. // // Returns nil if [Watcher.Close] was called. func (w *Watcher) Remove(name string) error { return w.remove(name, true) } func (w *Watcher) remove(name string, unwatchFiles bool) error { name = filepath.Clean(name) w.mu.Lock() if w.isClosed { w.mu.Unlock() return nil } watchfd, ok := w.watches[name] w.mu.Unlock() if !ok { return fmt.Errorf("%w: %s", ErrNonExistentWatch, name) } err := w.register([]int{watchfd}, unix.EV_DELETE, 0) if err != nil { return err } unix.Close(watchfd) w.mu.Lock() isDir := w.paths[watchfd].isDir delete(w.watches, name) delete(w.userWatches, name) parentName := filepath.Dir(name) delete(w.watchesByDir[parentName], watchfd) if len(w.watchesByDir[parentName]) == 0 { delete(w.watchesByDir, parentName) } delete(w.paths, watchfd) delete(w.dirFlags, name) delete(w.fileExists, name) w.mu.Unlock() // Find all watched paths that are in this directory that are not external. if unwatchFiles && isDir { var pathsToRemove []string w.mu.Lock() for fd := range w.watchesByDir[name] { path := w.paths[fd] if _, ok := w.userWatches[path.name]; !ok { pathsToRemove = append(pathsToRemove, path.name) } } w.mu.Unlock() for _, name := range pathsToRemove { // Since these are internal, not much sense in propagating error to // the user, as that will just confuse them with an error about a // path they did not explicitly watch themselves. w.Remove(name) } } return nil } // WatchList returns all paths explicitly added with [Watcher.Add] (and are not // yet removed). // // Returns nil if [Watcher.Close] was called. func (w *Watcher) WatchList() []string { w.mu.Lock() defer w.mu.Unlock() if w.isClosed { return nil } entries := make([]string, 0, len(w.userWatches)) for pathname := range w.userWatches { entries = append(entries, pathname) } return entries } // Watch all events (except NOTE_EXTEND, NOTE_LINK, NOTE_REVOKE) const noteAllEvents = unix.NOTE_DELETE | unix.NOTE_WRITE | unix.NOTE_ATTRIB | unix.NOTE_RENAME // addWatch adds name to the watched file set; the flags are interpreted as // described in kevent(2). // // Returns the real path to the file which was added, with symlinks resolved. func (w *Watcher) addWatch(name string, flags uint32) (string, error) { var isDir bool name = filepath.Clean(name) w.mu.Lock() if w.isClosed { w.mu.Unlock() return "", ErrClosed } watchfd, alreadyWatching := w.watches[name] // We already have a watch, but we can still override flags. if alreadyWatching { isDir = w.paths[watchfd].isDir } w.mu.Unlock() if !alreadyWatching { fi, err := os.Lstat(name) if err != nil { return "", err } // Don't watch sockets or named pipes if (fi.Mode()&os.ModeSocket == os.ModeSocket) || (fi.Mode()&os.ModeNamedPipe == os.ModeNamedPipe) { return "", nil } // Follow Symlinks. if fi.Mode()&os.ModeSymlink == os.ModeSymlink { link, err := os.Readlink(name) if err != nil { // Return nil because Linux can add unresolvable symlinks to the // watch list without problems, so maintain consistency with // that. There will be no file events for broken symlinks. // TODO: more specific check; returns os.PathError; ENOENT? return "", nil } w.mu.Lock() _, alreadyWatching = w.watches[link] w.mu.Unlock() if alreadyWatching { // Add to watches so we don't get spurious Create events later // on when we diff the directories. w.watches[name] = 0 w.fileExists[name] = struct{}{} return link, nil } name = link fi, err = os.Lstat(name) if err != nil { return "", nil } } // Retry on EINTR; open() can return EINTR in practice on macOS. // See #354, and Go issues 11180 and 39237. for { watchfd, err = unix.Open(name, openMode, 0) if err == nil { break } if errors.Is(err, unix.EINTR) { continue } return "", err } isDir = fi.IsDir() } err := w.register([]int{watchfd}, unix.EV_ADD|unix.EV_CLEAR|unix.EV_ENABLE, flags) if err != nil { unix.Close(watchfd) return "", err } if !alreadyWatching { w.mu.Lock() parentName := filepath.Dir(name) w.watches[name] = watchfd watchesByDir, ok := w.watchesByDir[parentName] if !ok { watchesByDir = make(map[int]struct{}, 1) w.watchesByDir[parentName] = watchesByDir } watchesByDir[watchfd] = struct{}{} w.paths[watchfd] = pathInfo{name: name, isDir: isDir} w.mu.Unlock() } if isDir { // Watch the directory if it has not been watched before, or if it was // watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles) w.mu.Lock() watchDir := (flags&unix.NOTE_WRITE) == unix.NOTE_WRITE && (!alreadyWatching || (w.dirFlags[name]&unix.NOTE_WRITE) != unix.NOTE_WRITE) // Store flags so this watch can be updated later w.dirFlags[name] = flags w.mu.Unlock() if watchDir { if err := w.watchDirectoryFiles(name); err != nil { return "", err } } } return name, nil } // readEvents reads from kqueue and converts the received kevents into // Event values that it sends down the Events channel. func (w *Watcher) readEvents() { defer func() { close(w.Events) close(w.Errors) _ = unix.Close(w.kq) unix.Close(w.closepipe[0]) }() eventBuffer := make([]unix.Kevent_t, 10) for closed := false; !closed; { kevents, err := w.read(eventBuffer) // EINTR is okay, the syscall was interrupted before timeout expired. if err != nil && err != unix.EINTR { if !w.sendError(fmt.Errorf("fsnotify.readEvents: %w", err)) { closed = true } continue } // Flush the events we received to the Events channel for _, kevent := range kevents { var ( watchfd = int(kevent.Ident) mask = uint32(kevent.Fflags) ) // Shut down the loop when the pipe is closed, but only after all // other events have been processed. if watchfd == w.closepipe[0] { closed = true continue } w.mu.Lock() path := w.paths[watchfd] w.mu.Unlock() event := w.newEvent(path.name, mask) if event.Has(Rename) || event.Has(Remove) { w.remove(event.Name, false) w.mu.Lock() delete(w.fileExists, event.Name) w.mu.Unlock() } if path.isDir && event.Has(Write) && !event.Has(Remove) { w.sendDirectoryChangeEvents(event.Name) } else { if !w.sendEvent(event) { closed = true continue } } if event.Has(Remove) { // Look for a file that may have overwritten this; for example, // mv f1 f2 will delete f2, then create f2. if path.isDir { fileDir := filepath.Clean(event.Name) w.mu.Lock() _, found := w.watches[fileDir] w.mu.Unlock() if found { err := w.sendDirectoryChangeEvents(fileDir) if err != nil { if !w.sendError(err) { closed = true } } } } else { filePath := filepath.Clean(event.Name) if fi, err := os.Lstat(filePath); err == nil { err := w.sendFileCreatedEventIfNew(filePath, fi) if err != nil { if !w.sendError(err) { closed = true } } } } } } } } // newEvent returns an platform-independent Event based on kqueue Fflags. func (w *Watcher) newEvent(name string, mask uint32) Event { e := Event{Name: name} if mask&unix.NOTE_DELETE == unix.NOTE_DELETE { e.Op |= Remove } if mask&unix.NOTE_WRITE == unix.NOTE_WRITE { e.Op |= Write } if mask&unix.NOTE_RENAME == unix.NOTE_RENAME { e.Op |= Rename } if mask&unix.NOTE_ATTRIB == unix.NOTE_ATTRIB { e.Op |= Chmod } // No point sending a write and delete event at the same time: if it's gone, // then it's gone. if e.Op.Has(Write) && e.Op.Has(Remove) { e.Op &^= Write } return e } // watchDirectoryFiles to mimic inotify when adding a watch on a directory func (w *Watcher) watchDirectoryFiles(dirPath string) error { // Get all files files, err := os.ReadDir(dirPath) if err != nil { return err } for _, f := range files { path := filepath.Join(dirPath, f.Name()) fi, err := f.Info() if err != nil { return fmt.Errorf("%q: %w", path, err) } cleanPath, err := w.internalWatch(path, fi) if err != nil { // No permission to read the file; that's not a problem: just skip. // But do add it to w.fileExists to prevent it from being picked up // as a "new" file later (it still shows up in the directory // listing). switch { case errors.Is(err, unix.EACCES) || errors.Is(err, unix.EPERM): cleanPath = filepath.Clean(path) default: return fmt.Errorf("%q: %w", path, err) } } w.mu.Lock() w.fileExists[cleanPath] = struct{}{} w.mu.Unlock() } return nil } // Search the directory for new files and send an event for them. // // This functionality is to have the BSD watcher match the inotify, which sends // a create event for files created in a watched directory. func (w *Watcher) sendDirectoryChangeEvents(dir string) error { files, err := os.ReadDir(dir) if err != nil { // Directory no longer exists: we can ignore this safely. kqueue will // still give us the correct events. if errors.Is(err, os.ErrNotExist) { return nil } return fmt.Errorf("fsnotify.sendDirectoryChangeEvents: %w", err) } for _, f := range files { fi, err := f.Info() if err != nil { return fmt.Errorf("fsnotify.sendDirectoryChangeEvents: %w", err) } err = w.sendFileCreatedEventIfNew(filepath.Join(dir, fi.Name()), fi) if err != nil { // Don't need to send an error if this file isn't readable. if errors.Is(err, unix.EACCES) || errors.Is(err, unix.EPERM) { return nil } return fmt.Errorf("fsnotify.sendDirectoryChangeEvents: %w", err) } } return nil } // sendFileCreatedEvent sends a create event if the file isn't already being tracked. func (w *Watcher) sendFileCreatedEventIfNew(filePath string, fi os.FileInfo) (err error) { w.mu.Lock() _, doesExist := w.fileExists[filePath] w.mu.Unlock() if !doesExist { if !w.sendEvent(Event{Name: filePath, Op: Create}) { return } } // like watchDirectoryFiles (but without doing another ReadDir) filePath, err = w.internalWatch(filePath, fi) if err != nil { return err } w.mu.Lock() w.fileExists[filePath] = struct{}{} w.mu.Unlock() return nil } func (w *Watcher) internalWatch(name string, fi os.FileInfo) (string, error) { if fi.IsDir() { // mimic Linux providing delete events for subdirectories, but preserve // the flags used if currently watching subdirectory w.mu.Lock() flags := w.dirFlags[name] w.mu.Unlock() flags |= unix.NOTE_DELETE | unix.NOTE_RENAME return w.addWatch(name, flags) } // watch file to mimic Linux inotify return w.addWatch(name, noteAllEvents) } // Register events with the queue. func (w *Watcher) register(fds []int, flags int, fflags uint32) error { changes := make([]unix.Kevent_t, len(fds)) for i, fd := range fds { // SetKevent converts int to the platform-specific types. unix.SetKevent(&changes[i], fd, unix.EVFILT_VNODE, flags) changes[i].Fflags = fflags } // Register the events. success, err := unix.Kevent(w.kq, changes, nil, nil) if success == -1 { return err } return nil } // read retrieves pending events, or waits until an event occurs. func (w *Watcher) read(events []unix.Kevent_t) ([]unix.Kevent_t, error) { n, err := unix.Kevent(w.kq, nil, events, nil) if err != nil { return nil, err } return events[0:n], nil }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/fsnotify/fsnotify/fsnotify.go
vendor/github.com/fsnotify/fsnotify/fsnotify.go
// Package fsnotify provides a cross-platform interface for file system // notifications. // // Currently supported systems: // // Linux 2.6.32+ via inotify // BSD, macOS via kqueue // Windows via ReadDirectoryChangesW // illumos via FEN package fsnotify import ( "errors" "fmt" "path/filepath" "strings" ) // Event represents a file system notification. type Event struct { // Path to the file or directory. // // Paths are relative to the input; for example with Add("dir") the Name // will be set to "dir/file" if you create that file, but if you use // Add("/path/to/dir") it will be "/path/to/dir/file". Name string // File operation that triggered the event. // // This is a bitmask and some systems may send multiple operations at once. // Use the Event.Has() method instead of comparing with ==. Op Op } // Op describes a set of file operations. type Op uint32 // The operations fsnotify can trigger; see the documentation on [Watcher] for a // full description, and check them with [Event.Has]. const ( // A new pathname was created. Create Op = 1 << iota // The pathname was written to; this does *not* mean the write has finished, // and a write can be followed by more writes. Write // The path was removed; any watches on it will be removed. Some "remove" // operations may trigger a Rename if the file is actually moved (for // example "remove to trash" is often a rename). Remove // The path was renamed to something else; any watched on it will be // removed. Rename // File attributes were changed. // // It's generally not recommended to take action on this event, as it may // get triggered very frequently by some software. For example, Spotlight // indexing on macOS, anti-virus software, backup software, etc. Chmod ) // Common errors that can be reported. var ( ErrNonExistentWatch = errors.New("fsnotify: can't remove non-existent watch") ErrEventOverflow = errors.New("fsnotify: queue or buffer overflow") ErrClosed = errors.New("fsnotify: watcher already closed") ) func (o Op) String() string { var b strings.Builder if o.Has(Create) { b.WriteString("|CREATE") } if o.Has(Remove) { b.WriteString("|REMOVE") } if o.Has(Write) { b.WriteString("|WRITE") } if o.Has(Rename) { b.WriteString("|RENAME") } if o.Has(Chmod) { b.WriteString("|CHMOD") } if b.Len() == 0 { return "[no events]" } return b.String()[1:] } // Has reports if this operation has the given operation. func (o Op) Has(h Op) bool { return o&h != 0 } // Has reports if this event has the given operation. func (e Event) Has(op Op) bool { return e.Op.Has(op) } // String returns a string representation of the event with their path. func (e Event) String() string { return fmt.Sprintf("%-13s %q", e.Op.String(), e.Name) } type ( addOpt func(opt *withOpts) withOpts struct { bufsize int } ) var defaultOpts = withOpts{ bufsize: 65536, // 64K } func getOptions(opts ...addOpt) withOpts { with := defaultOpts for _, o := range opts { o(&with) } return with } // WithBufferSize sets the [ReadDirectoryChangesW] buffer size. // // This only has effect on Windows systems, and is a no-op for other backends. // // The default value is 64K (65536 bytes) which is the highest value that works // on all filesystems and should be enough for most applications, but if you // have a large burst of events it may not be enough. You can increase it if // you're hitting "queue or buffer overflow" errors ([ErrEventOverflow]). // // [ReadDirectoryChangesW]: https://learn.microsoft.com/en-gb/windows/win32/api/winbase/nf-winbase-readdirectorychangesw func WithBufferSize(bytes int) addOpt { return func(opt *withOpts) { opt.bufsize = bytes } } // Check if this path is recursive (ends with "/..." or "\..."), and return the // path with the /... stripped. func recursivePath(path string) (string, bool) { if filepath.Base(path) == "..." { return filepath.Dir(path), true } return path, false }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/fsnotify/fsnotify/backend_other.go
vendor/github.com/fsnotify/fsnotify/backend_other.go
//go:build appengine || (!darwin && !dragonfly && !freebsd && !openbsd && !linux && !netbsd && !solaris && !windows) // +build appengine !darwin,!dragonfly,!freebsd,!openbsd,!linux,!netbsd,!solaris,!windows // Note: the documentation on the Watcher type and methods is generated from // mkdoc.zsh package fsnotify import "errors" // Watcher watches a set of paths, delivering events on a channel. // // A watcher should not be copied (e.g. pass it by pointer, rather than by // value). // // # Linux notes // // When a file is removed a Remove event won't be emitted until all file // descriptors are closed, and deletes will always emit a Chmod. For example: // // fp := os.Open("file") // os.Remove("file") // Triggers Chmod // fp.Close() // Triggers Remove // // This is the event that inotify sends, so not much can be changed about this. // // The fs.inotify.max_user_watches sysctl variable specifies the upper limit // for the number of watches per user, and fs.inotify.max_user_instances // specifies the maximum number of inotify instances per user. Every Watcher you // create is an "instance", and every path you add is a "watch". // // These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and // /proc/sys/fs/inotify/max_user_instances // // To increase them you can use sysctl or write the value to the /proc file: // // # Default values on Linux 5.18 // sysctl fs.inotify.max_user_watches=124983 // sysctl fs.inotify.max_user_instances=128 // // To make the changes persist on reboot edit /etc/sysctl.conf or // /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check // your distro's documentation): // // fs.inotify.max_user_watches=124983 // fs.inotify.max_user_instances=128 // // Reaching the limit will result in a "no space left on device" or "too many open // files" error. // // # kqueue notes (macOS, BSD) // // kqueue requires opening a file descriptor for every file that's being watched; // so if you're watching a directory with five files then that's six file // descriptors. You will run in to your system's "max open files" limit faster on // these platforms. // // The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to // control the maximum number of open files, as well as /etc/login.conf on BSD // systems. // // # Windows notes // // Paths can be added as "C:\path\to\dir", but forward slashes // ("C:/path/to/dir") will also work. // // When a watched directory is removed it will always send an event for the // directory itself, but may not send events for all files in that directory. // Sometimes it will send events for all times, sometimes it will send no // events, and often only for some files. // // The default ReadDirectoryChangesW() buffer size is 64K, which is the largest // value that is guaranteed to work with SMB filesystems. If you have many // events in quick succession this may not be enough, and you will have to use // [WithBufferSize] to increase the value. type Watcher struct { // Events sends the filesystem change events. // // fsnotify can send the following events; a "path" here can refer to a // file, directory, symbolic link, or special file like a FIFO. // // fsnotify.Create A new path was created; this may be followed by one // or more Write events if data also gets written to a // file. // // fsnotify.Remove A path was removed. // // fsnotify.Rename A path was renamed. A rename is always sent with the // old path as Event.Name, and a Create event will be // sent with the new name. Renames are only sent for // paths that are currently watched; e.g. moving an // unmonitored file into a monitored directory will // show up as just a Create. Similarly, renaming a file // to outside a monitored directory will show up as // only a Rename. // // fsnotify.Write A file or named pipe was written to. A Truncate will // also trigger a Write. A single "write action" // initiated by the user may show up as one or multiple // writes, depending on when the system syncs things to // disk. For example when compiling a large Go program // you may get hundreds of Write events, and you may // want to wait until you've stopped receiving them // (see the dedup example in cmd/fsnotify). // // Some systems may send Write event for directories // when the directory content changes. // // fsnotify.Chmod Attributes were changed. On Linux this is also sent // when a file is removed (or more accurately, when a // link to an inode is removed). On kqueue it's sent // when a file is truncated. On Windows it's never // sent. Events chan Event // Errors sends any errors. // // ErrEventOverflow is used to indicate there are too many events: // // - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl) // - windows: The buffer size is too small; WithBufferSize() can be used to increase it. // - kqueue, fen: Not used. Errors chan error } // NewWatcher creates a new Watcher. func NewWatcher() (*Watcher, error) { return nil, errors.New("fsnotify not supported on the current platform") } // NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events // channel. // // The main use case for this is situations with a very large number of events // where the kernel buffer size can't be increased (e.g. due to lack of // permissions). An unbuffered Watcher will perform better for almost all use // cases, and whenever possible you will be better off increasing the kernel // buffers instead of adding a large userspace buffer. func NewBufferedWatcher(sz uint) (*Watcher, error) { return NewWatcher() } // Close removes all watches and closes the Events channel. func (w *Watcher) Close() error { return nil } // WatchList returns all paths explicitly added with [Watcher.Add] (and are not // yet removed). // // Returns nil if [Watcher.Close] was called. func (w *Watcher) WatchList() []string { return nil } // Add starts monitoring the path for changes. // // A path can only be watched once; watching it more than once is a no-op and will // not return an error. Paths that do not yet exist on the filesystem cannot be // watched. // // A watch will be automatically removed if the watched path is deleted or // renamed. The exception is the Windows backend, which doesn't remove the // watcher on renames. // // Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special // filesystems (/proc, /sys, etc.) generally don't work. // // Returns [ErrClosed] if [Watcher.Close] was called. // // See [Watcher.AddWith] for a version that allows adding options. // // # Watching directories // // All files in a directory are monitored, including new files that are created // after the watcher is started. Subdirectories are not watched (i.e. it's // non-recursive). // // # Watching files // // Watching individual files (rather than directories) is generally not // recommended as many programs (especially editors) update files atomically: it // will write to a temporary file which is then moved to to destination, // overwriting the original (or some variant thereof). The watcher on the // original file is now lost, as that no longer exists. // // The upshot of this is that a power failure or crash won't leave a // half-written file. // // Watch the parent directory and use Event.Name to filter out files you're not // interested in. There is an example of this in cmd/fsnotify/file.go. func (w *Watcher) Add(name string) error { return nil } // AddWith is like [Watcher.Add], but allows adding options. When using Add() // the defaults described below are used. // // Possible options are: // // - [WithBufferSize] sets the buffer size for the Windows backend; no-op on // other platforms. The default is 64K (65536 bytes). func (w *Watcher) AddWith(name string, opts ...addOpt) error { return nil } // Remove stops monitoring the path for changes. // // Directories are always removed non-recursively. For example, if you added // /tmp/dir and /tmp/dir/subdir then you will need to remove both. // // Removing a path that has not yet been added returns [ErrNonExistentWatch]. // // Returns nil if [Watcher.Close] was called. func (w *Watcher) Remove(name string) error { return nil }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/fsnotify/fsnotify/backend_inotify.go
vendor/github.com/fsnotify/fsnotify/backend_inotify.go
//go:build linux && !appengine // +build linux,!appengine // Note: the documentation on the Watcher type and methods is generated from // mkdoc.zsh package fsnotify import ( "errors" "fmt" "io" "os" "path/filepath" "strings" "sync" "unsafe" "golang.org/x/sys/unix" ) // Watcher watches a set of paths, delivering events on a channel. // // A watcher should not be copied (e.g. pass it by pointer, rather than by // value). // // # Linux notes // // When a file is removed a Remove event won't be emitted until all file // descriptors are closed, and deletes will always emit a Chmod. For example: // // fp := os.Open("file") // os.Remove("file") // Triggers Chmod // fp.Close() // Triggers Remove // // This is the event that inotify sends, so not much can be changed about this. // // The fs.inotify.max_user_watches sysctl variable specifies the upper limit // for the number of watches per user, and fs.inotify.max_user_instances // specifies the maximum number of inotify instances per user. Every Watcher you // create is an "instance", and every path you add is a "watch". // // These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and // /proc/sys/fs/inotify/max_user_instances // // To increase them you can use sysctl or write the value to the /proc file: // // # Default values on Linux 5.18 // sysctl fs.inotify.max_user_watches=124983 // sysctl fs.inotify.max_user_instances=128 // // To make the changes persist on reboot edit /etc/sysctl.conf or // /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check // your distro's documentation): // // fs.inotify.max_user_watches=124983 // fs.inotify.max_user_instances=128 // // Reaching the limit will result in a "no space left on device" or "too many open // files" error. // // # kqueue notes (macOS, BSD) // // kqueue requires opening a file descriptor for every file that's being watched; // so if you're watching a directory with five files then that's six file // descriptors. You will run in to your system's "max open files" limit faster on // these platforms. // // The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to // control the maximum number of open files, as well as /etc/login.conf on BSD // systems. // // # Windows notes // // Paths can be added as "C:\path\to\dir", but forward slashes // ("C:/path/to/dir") will also work. // // When a watched directory is removed it will always send an event for the // directory itself, but may not send events for all files in that directory. // Sometimes it will send events for all times, sometimes it will send no // events, and often only for some files. // // The default ReadDirectoryChangesW() buffer size is 64K, which is the largest // value that is guaranteed to work with SMB filesystems. If you have many // events in quick succession this may not be enough, and you will have to use // [WithBufferSize] to increase the value. type Watcher struct { // Events sends the filesystem change events. // // fsnotify can send the following events; a "path" here can refer to a // file, directory, symbolic link, or special file like a FIFO. // // fsnotify.Create A new path was created; this may be followed by one // or more Write events if data also gets written to a // file. // // fsnotify.Remove A path was removed. // // fsnotify.Rename A path was renamed. A rename is always sent with the // old path as Event.Name, and a Create event will be // sent with the new name. Renames are only sent for // paths that are currently watched; e.g. moving an // unmonitored file into a monitored directory will // show up as just a Create. Similarly, renaming a file // to outside a monitored directory will show up as // only a Rename. // // fsnotify.Write A file or named pipe was written to. A Truncate will // also trigger a Write. A single "write action" // initiated by the user may show up as one or multiple // writes, depending on when the system syncs things to // disk. For example when compiling a large Go program // you may get hundreds of Write events, and you may // want to wait until you've stopped receiving them // (see the dedup example in cmd/fsnotify). // // Some systems may send Write event for directories // when the directory content changes. // // fsnotify.Chmod Attributes were changed. On Linux this is also sent // when a file is removed (or more accurately, when a // link to an inode is removed). On kqueue it's sent // when a file is truncated. On Windows it's never // sent. Events chan Event // Errors sends any errors. // // ErrEventOverflow is used to indicate there are too many events: // // - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl) // - windows: The buffer size is too small; WithBufferSize() can be used to increase it. // - kqueue, fen: Not used. Errors chan error // Store fd here as os.File.Read() will no longer return on close after // calling Fd(). See: https://github.com/golang/go/issues/26439 fd int inotifyFile *os.File watches *watches done chan struct{} // Channel for sending a "quit message" to the reader goroutine closeMu sync.Mutex doneResp chan struct{} // Channel to respond to Close } type ( watches struct { mu sync.RWMutex wd map[uint32]*watch // wd → watch path map[string]uint32 // pathname → wd } watch struct { wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall) flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags) path string // Watch path. } ) func newWatches() *watches { return &watches{ wd: make(map[uint32]*watch), path: make(map[string]uint32), } } func (w *watches) len() int { w.mu.RLock() defer w.mu.RUnlock() return len(w.wd) } func (w *watches) add(ww *watch) { w.mu.Lock() defer w.mu.Unlock() w.wd[ww.wd] = ww w.path[ww.path] = ww.wd } func (w *watches) remove(wd uint32) { w.mu.Lock() defer w.mu.Unlock() delete(w.path, w.wd[wd].path) delete(w.wd, wd) } func (w *watches) removePath(path string) (uint32, bool) { w.mu.Lock() defer w.mu.Unlock() wd, ok := w.path[path] if !ok { return 0, false } delete(w.path, path) delete(w.wd, wd) return wd, true } func (w *watches) byPath(path string) *watch { w.mu.RLock() defer w.mu.RUnlock() return w.wd[w.path[path]] } func (w *watches) byWd(wd uint32) *watch { w.mu.RLock() defer w.mu.RUnlock() return w.wd[wd] } func (w *watches) updatePath(path string, f func(*watch) (*watch, error)) error { w.mu.Lock() defer w.mu.Unlock() var existing *watch wd, ok := w.path[path] if ok { existing = w.wd[wd] } upd, err := f(existing) if err != nil { return err } if upd != nil { w.wd[upd.wd] = upd w.path[upd.path] = upd.wd if upd.wd != wd { delete(w.wd, wd) } } return nil } // NewWatcher creates a new Watcher. func NewWatcher() (*Watcher, error) { return NewBufferedWatcher(0) } // NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events // channel. // // The main use case for this is situations with a very large number of events // where the kernel buffer size can't be increased (e.g. due to lack of // permissions). An unbuffered Watcher will perform better for almost all use // cases, and whenever possible you will be better off increasing the kernel // buffers instead of adding a large userspace buffer. func NewBufferedWatcher(sz uint) (*Watcher, error) { // Need to set nonblocking mode for SetDeadline to work, otherwise blocking // I/O operations won't terminate on close. fd, errno := unix.InotifyInit1(unix.IN_CLOEXEC | unix.IN_NONBLOCK) if fd == -1 { return nil, errno } w := &Watcher{ fd: fd, inotifyFile: os.NewFile(uintptr(fd), ""), watches: newWatches(), Events: make(chan Event, sz), Errors: make(chan error), done: make(chan struct{}), doneResp: make(chan struct{}), } go w.readEvents() return w, nil } // Returns true if the event was sent, or false if watcher is closed. func (w *Watcher) sendEvent(e Event) bool { select { case w.Events <- e: return true case <-w.done: return false } } // Returns true if the error was sent, or false if watcher is closed. func (w *Watcher) sendError(err error) bool { select { case w.Errors <- err: return true case <-w.done: return false } } func (w *Watcher) isClosed() bool { select { case <-w.done: return true default: return false } } // Close removes all watches and closes the Events channel. func (w *Watcher) Close() error { w.closeMu.Lock() if w.isClosed() { w.closeMu.Unlock() return nil } close(w.done) w.closeMu.Unlock() // Causes any blocking reads to return with an error, provided the file // still supports deadline operations. err := w.inotifyFile.Close() if err != nil { return err } // Wait for goroutine to close <-w.doneResp return nil } // Add starts monitoring the path for changes. // // A path can only be watched once; watching it more than once is a no-op and will // not return an error. Paths that do not yet exist on the filesystem cannot be // watched. // // A watch will be automatically removed if the watched path is deleted or // renamed. The exception is the Windows backend, which doesn't remove the // watcher on renames. // // Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special // filesystems (/proc, /sys, etc.) generally don't work. // // Returns [ErrClosed] if [Watcher.Close] was called. // // See [Watcher.AddWith] for a version that allows adding options. // // # Watching directories // // All files in a directory are monitored, including new files that are created // after the watcher is started. Subdirectories are not watched (i.e. it's // non-recursive). // // # Watching files // // Watching individual files (rather than directories) is generally not // recommended as many programs (especially editors) update files atomically: it // will write to a temporary file which is then moved to to destination, // overwriting the original (or some variant thereof). The watcher on the // original file is now lost, as that no longer exists. // // The upshot of this is that a power failure or crash won't leave a // half-written file. // // Watch the parent directory and use Event.Name to filter out files you're not // interested in. There is an example of this in cmd/fsnotify/file.go. func (w *Watcher) Add(name string) error { return w.AddWith(name) } // AddWith is like [Watcher.Add], but allows adding options. When using Add() // the defaults described below are used. // // Possible options are: // // - [WithBufferSize] sets the buffer size for the Windows backend; no-op on // other platforms. The default is 64K (65536 bytes). func (w *Watcher) AddWith(name string, opts ...addOpt) error { if w.isClosed() { return ErrClosed } name = filepath.Clean(name) _ = getOptions(opts...) var flags uint32 = unix.IN_MOVED_TO | unix.IN_MOVED_FROM | unix.IN_CREATE | unix.IN_ATTRIB | unix.IN_MODIFY | unix.IN_MOVE_SELF | unix.IN_DELETE | unix.IN_DELETE_SELF return w.watches.updatePath(name, func(existing *watch) (*watch, error) { if existing != nil { flags |= existing.flags | unix.IN_MASK_ADD } wd, err := unix.InotifyAddWatch(w.fd, name, flags) if wd == -1 { return nil, err } if existing == nil { return &watch{ wd: uint32(wd), path: name, flags: flags, }, nil } existing.wd = uint32(wd) existing.flags = flags return existing, nil }) } // Remove stops monitoring the path for changes. // // Directories are always removed non-recursively. For example, if you added // /tmp/dir and /tmp/dir/subdir then you will need to remove both. // // Removing a path that has not yet been added returns [ErrNonExistentWatch]. // // Returns nil if [Watcher.Close] was called. func (w *Watcher) Remove(name string) error { if w.isClosed() { return nil } return w.remove(filepath.Clean(name)) } func (w *Watcher) remove(name string) error { wd, ok := w.watches.removePath(name) if !ok { return fmt.Errorf("%w: %s", ErrNonExistentWatch, name) } success, errno := unix.InotifyRmWatch(w.fd, wd) if success == -1 { // TODO: Perhaps it's not helpful to return an error here in every case; // The only two possible errors are: // // - EBADF, which happens when w.fd is not a valid file descriptor // of any kind. // - EINVAL, which is when fd is not an inotify descriptor or wd // is not a valid watch descriptor. Watch descriptors are // invalidated when they are removed explicitly or implicitly; // explicitly by inotify_rm_watch, implicitly when the file they // are watching is deleted. return errno } return nil } // WatchList returns all paths explicitly added with [Watcher.Add] (and are not // yet removed). // // Returns nil if [Watcher.Close] was called. func (w *Watcher) WatchList() []string { if w.isClosed() { return nil } entries := make([]string, 0, w.watches.len()) w.watches.mu.RLock() for pathname := range w.watches.path { entries = append(entries, pathname) } w.watches.mu.RUnlock() return entries } // readEvents reads from the inotify file descriptor, converts the // received events into Event objects and sends them via the Events channel func (w *Watcher) readEvents() { defer func() { close(w.doneResp) close(w.Errors) close(w.Events) }() var ( buf [unix.SizeofInotifyEvent * 4096]byte // Buffer for a maximum of 4096 raw events errno error // Syscall errno ) for { // See if we have been closed. if w.isClosed() { return } n, err := w.inotifyFile.Read(buf[:]) switch { case errors.Unwrap(err) == os.ErrClosed: return case err != nil: if !w.sendError(err) { return } continue } if n < unix.SizeofInotifyEvent { var err error if n == 0 { err = io.EOF // If EOF is received. This should really never happen. } else if n < 0 { err = errno // If an error occurred while reading. } else { err = errors.New("notify: short read in readEvents()") // Read was too short. } if !w.sendError(err) { return } continue } var offset uint32 // We don't know how many events we just read into the buffer // While the offset points to at least one whole event... for offset <= uint32(n-unix.SizeofInotifyEvent) { var ( // Point "raw" to the event in the buffer raw = (*unix.InotifyEvent)(unsafe.Pointer(&buf[offset])) mask = uint32(raw.Mask) nameLen = uint32(raw.Len) ) if mask&unix.IN_Q_OVERFLOW != 0 { if !w.sendError(ErrEventOverflow) { return } } // If the event happened to the watched directory or the watched file, the kernel // doesn't append the filename to the event, but we would like to always fill the // the "Name" field with a valid filename. We retrieve the path of the watch from // the "paths" map. watch := w.watches.byWd(uint32(raw.Wd)) // inotify will automatically remove the watch on deletes; just need // to clean our state here. if watch != nil && mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF { w.watches.remove(watch.wd) } // We can't really update the state when a watched path is moved; // only IN_MOVE_SELF is sent and not IN_MOVED_{FROM,TO}. So remove // the watch. if watch != nil && mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF { err := w.remove(watch.path) if err != nil && !errors.Is(err, ErrNonExistentWatch) { if !w.sendError(err) { return } } } var name string if watch != nil { name = watch.path } if nameLen > 0 { // Point "bytes" at the first byte of the filename bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent]))[:nameLen:nameLen] // The filename is padded with NULL bytes. TrimRight() gets rid of those. name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000") } event := w.newEvent(name, mask) // Send the events that are not ignored on the events channel if mask&unix.IN_IGNORED == 0 { if !w.sendEvent(event) { return } } // Move to the next event in the buffer offset += unix.SizeofInotifyEvent + nameLen } } } // newEvent returns an platform-independent Event based on an inotify mask. func (w *Watcher) newEvent(name string, mask uint32) Event { e := Event{Name: name} if mask&unix.IN_CREATE == unix.IN_CREATE || mask&unix.IN_MOVED_TO == unix.IN_MOVED_TO { e.Op |= Create } if mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF || mask&unix.IN_DELETE == unix.IN_DELETE { e.Op |= Remove } if mask&unix.IN_MODIFY == unix.IN_MODIFY { e.Op |= Write } if mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF || mask&unix.IN_MOVED_FROM == unix.IN_MOVED_FROM { e.Op |= Rename } if mask&unix.IN_ATTRIB == unix.IN_ATTRIB { e.Op |= Chmod } return e }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/fsnotify/fsnotify/system_bsd.go
vendor/github.com/fsnotify/fsnotify/system_bsd.go
//go:build freebsd || openbsd || netbsd || dragonfly // +build freebsd openbsd netbsd dragonfly package fsnotify import "golang.org/x/sys/unix" const openMode = unix.O_NONBLOCK | unix.O_RDONLY | unix.O_CLOEXEC
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/fsnotify/fsnotify/backend_fen.go
vendor/github.com/fsnotify/fsnotify/backend_fen.go
//go:build solaris // +build solaris // Note: the documentation on the Watcher type and methods is generated from // mkdoc.zsh package fsnotify import ( "errors" "fmt" "os" "path/filepath" "sync" "golang.org/x/sys/unix" ) // Watcher watches a set of paths, delivering events on a channel. // // A watcher should not be copied (e.g. pass it by pointer, rather than by // value). // // # Linux notes // // When a file is removed a Remove event won't be emitted until all file // descriptors are closed, and deletes will always emit a Chmod. For example: // // fp := os.Open("file") // os.Remove("file") // Triggers Chmod // fp.Close() // Triggers Remove // // This is the event that inotify sends, so not much can be changed about this. // // The fs.inotify.max_user_watches sysctl variable specifies the upper limit // for the number of watches per user, and fs.inotify.max_user_instances // specifies the maximum number of inotify instances per user. Every Watcher you // create is an "instance", and every path you add is a "watch". // // These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and // /proc/sys/fs/inotify/max_user_instances // // To increase them you can use sysctl or write the value to the /proc file: // // # Default values on Linux 5.18 // sysctl fs.inotify.max_user_watches=124983 // sysctl fs.inotify.max_user_instances=128 // // To make the changes persist on reboot edit /etc/sysctl.conf or // /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check // your distro's documentation): // // fs.inotify.max_user_watches=124983 // fs.inotify.max_user_instances=128 // // Reaching the limit will result in a "no space left on device" or "too many open // files" error. // // # kqueue notes (macOS, BSD) // // kqueue requires opening a file descriptor for every file that's being watched; // so if you're watching a directory with five files then that's six file // descriptors. You will run in to your system's "max open files" limit faster on // these platforms. // // The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to // control the maximum number of open files, as well as /etc/login.conf on BSD // systems. // // # Windows notes // // Paths can be added as "C:\path\to\dir", but forward slashes // ("C:/path/to/dir") will also work. // // When a watched directory is removed it will always send an event for the // directory itself, but may not send events for all files in that directory. // Sometimes it will send events for all times, sometimes it will send no // events, and often only for some files. // // The default ReadDirectoryChangesW() buffer size is 64K, which is the largest // value that is guaranteed to work with SMB filesystems. If you have many // events in quick succession this may not be enough, and you will have to use // [WithBufferSize] to increase the value. type Watcher struct { // Events sends the filesystem change events. // // fsnotify can send the following events; a "path" here can refer to a // file, directory, symbolic link, or special file like a FIFO. // // fsnotify.Create A new path was created; this may be followed by one // or more Write events if data also gets written to a // file. // // fsnotify.Remove A path was removed. // // fsnotify.Rename A path was renamed. A rename is always sent with the // old path as Event.Name, and a Create event will be // sent with the new name. Renames are only sent for // paths that are currently watched; e.g. moving an // unmonitored file into a monitored directory will // show up as just a Create. Similarly, renaming a file // to outside a monitored directory will show up as // only a Rename. // // fsnotify.Write A file or named pipe was written to. A Truncate will // also trigger a Write. A single "write action" // initiated by the user may show up as one or multiple // writes, depending on when the system syncs things to // disk. For example when compiling a large Go program // you may get hundreds of Write events, and you may // want to wait until you've stopped receiving them // (see the dedup example in cmd/fsnotify). // // Some systems may send Write event for directories // when the directory content changes. // // fsnotify.Chmod Attributes were changed. On Linux this is also sent // when a file is removed (or more accurately, when a // link to an inode is removed). On kqueue it's sent // when a file is truncated. On Windows it's never // sent. Events chan Event // Errors sends any errors. // // ErrEventOverflow is used to indicate there are too many events: // // - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl) // - windows: The buffer size is too small; WithBufferSize() can be used to increase it. // - kqueue, fen: Not used. Errors chan error mu sync.Mutex port *unix.EventPort done chan struct{} // Channel for sending a "quit message" to the reader goroutine dirs map[string]struct{} // Explicitly watched directories watches map[string]struct{} // Explicitly watched non-directories } // NewWatcher creates a new Watcher. func NewWatcher() (*Watcher, error) { return NewBufferedWatcher(0) } // NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events // channel. // // The main use case for this is situations with a very large number of events // where the kernel buffer size can't be increased (e.g. due to lack of // permissions). An unbuffered Watcher will perform better for almost all use // cases, and whenever possible you will be better off increasing the kernel // buffers instead of adding a large userspace buffer. func NewBufferedWatcher(sz uint) (*Watcher, error) { w := &Watcher{ Events: make(chan Event, sz), Errors: make(chan error), dirs: make(map[string]struct{}), watches: make(map[string]struct{}), done: make(chan struct{}), } var err error w.port, err = unix.NewEventPort() if err != nil { return nil, fmt.Errorf("fsnotify.NewWatcher: %w", err) } go w.readEvents() return w, nil } // sendEvent attempts to send an event to the user, returning true if the event // was put in the channel successfully and false if the watcher has been closed. func (w *Watcher) sendEvent(name string, op Op) (sent bool) { select { case w.Events <- Event{Name: name, Op: op}: return true case <-w.done: return false } } // sendError attempts to send an error to the user, returning true if the error // was put in the channel successfully and false if the watcher has been closed. func (w *Watcher) sendError(err error) (sent bool) { select { case w.Errors <- err: return true case <-w.done: return false } } func (w *Watcher) isClosed() bool { select { case <-w.done: return true default: return false } } // Close removes all watches and closes the Events channel. func (w *Watcher) Close() error { // Take the lock used by associateFile to prevent lingering events from // being processed after the close w.mu.Lock() defer w.mu.Unlock() if w.isClosed() { return nil } close(w.done) return w.port.Close() } // Add starts monitoring the path for changes. // // A path can only be watched once; watching it more than once is a no-op and will // not return an error. Paths that do not yet exist on the filesystem cannot be // watched. // // A watch will be automatically removed if the watched path is deleted or // renamed. The exception is the Windows backend, which doesn't remove the // watcher on renames. // // Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special // filesystems (/proc, /sys, etc.) generally don't work. // // Returns [ErrClosed] if [Watcher.Close] was called. // // See [Watcher.AddWith] for a version that allows adding options. // // # Watching directories // // All files in a directory are monitored, including new files that are created // after the watcher is started. Subdirectories are not watched (i.e. it's // non-recursive). // // # Watching files // // Watching individual files (rather than directories) is generally not // recommended as many programs (especially editors) update files atomically: it // will write to a temporary file which is then moved to to destination, // overwriting the original (or some variant thereof). The watcher on the // original file is now lost, as that no longer exists. // // The upshot of this is that a power failure or crash won't leave a // half-written file. // // Watch the parent directory and use Event.Name to filter out files you're not // interested in. There is an example of this in cmd/fsnotify/file.go. func (w *Watcher) Add(name string) error { return w.AddWith(name) } // AddWith is like [Watcher.Add], but allows adding options. When using Add() // the defaults described below are used. // // Possible options are: // // - [WithBufferSize] sets the buffer size for the Windows backend; no-op on // other platforms. The default is 64K (65536 bytes). func (w *Watcher) AddWith(name string, opts ...addOpt) error { if w.isClosed() { return ErrClosed } if w.port.PathIsWatched(name) { return nil } _ = getOptions(opts...) // Currently we resolve symlinks that were explicitly requested to be // watched. Otherwise we would use LStat here. stat, err := os.Stat(name) if err != nil { return err } // Associate all files in the directory. if stat.IsDir() { err := w.handleDirectory(name, stat, true, w.associateFile) if err != nil { return err } w.mu.Lock() w.dirs[name] = struct{}{} w.mu.Unlock() return nil } err = w.associateFile(name, stat, true) if err != nil { return err } w.mu.Lock() w.watches[name] = struct{}{} w.mu.Unlock() return nil } // Remove stops monitoring the path for changes. // // Directories are always removed non-recursively. For example, if you added // /tmp/dir and /tmp/dir/subdir then you will need to remove both. // // Removing a path that has not yet been added returns [ErrNonExistentWatch]. // // Returns nil if [Watcher.Close] was called. func (w *Watcher) Remove(name string) error { if w.isClosed() { return nil } if !w.port.PathIsWatched(name) { return fmt.Errorf("%w: %s", ErrNonExistentWatch, name) } // The user has expressed an intent. Immediately remove this name from // whichever watch list it might be in. If it's not in there the delete // doesn't cause harm. w.mu.Lock() delete(w.watches, name) delete(w.dirs, name) w.mu.Unlock() stat, err := os.Stat(name) if err != nil { return err } // Remove associations for every file in the directory. if stat.IsDir() { err := w.handleDirectory(name, stat, false, w.dissociateFile) if err != nil { return err } return nil } err = w.port.DissociatePath(name) if err != nil { return err } return nil } // readEvents contains the main loop that runs in a goroutine watching for events. func (w *Watcher) readEvents() { // If this function returns, the watcher has been closed and we can close // these channels defer func() { close(w.Errors) close(w.Events) }() pevents := make([]unix.PortEvent, 8) for { count, err := w.port.Get(pevents, 1, nil) if err != nil && err != unix.ETIME { // Interrupted system call (count should be 0) ignore and continue if errors.Is(err, unix.EINTR) && count == 0 { continue } // Get failed because we called w.Close() if errors.Is(err, unix.EBADF) && w.isClosed() { return } // There was an error not caused by calling w.Close() if !w.sendError(err) { return } } p := pevents[:count] for _, pevent := range p { if pevent.Source != unix.PORT_SOURCE_FILE { // Event from unexpected source received; should never happen. if !w.sendError(errors.New("Event from unexpected source received")) { return } continue } err = w.handleEvent(&pevent) if err != nil { if !w.sendError(err) { return } } } } } func (w *Watcher) handleDirectory(path string, stat os.FileInfo, follow bool, handler func(string, os.FileInfo, bool) error) error { files, err := os.ReadDir(path) if err != nil { return err } // Handle all children of the directory. for _, entry := range files { finfo, err := entry.Info() if err != nil { return err } err = handler(filepath.Join(path, finfo.Name()), finfo, false) if err != nil { return err } } // And finally handle the directory itself. return handler(path, stat, follow) } // handleEvent might need to emit more than one fsnotify event if the events // bitmap matches more than one event type (e.g. the file was both modified and // had the attributes changed between when the association was created and the // when event was returned) func (w *Watcher) handleEvent(event *unix.PortEvent) error { var ( events = event.Events path = event.Path fmode = event.Cookie.(os.FileMode) reRegister = true ) w.mu.Lock() _, watchedDir := w.dirs[path] _, watchedPath := w.watches[path] w.mu.Unlock() isWatched := watchedDir || watchedPath if events&unix.FILE_DELETE != 0 { if !w.sendEvent(path, Remove) { return nil } reRegister = false } if events&unix.FILE_RENAME_FROM != 0 { if !w.sendEvent(path, Rename) { return nil } // Don't keep watching the new file name reRegister = false } if events&unix.FILE_RENAME_TO != 0 { // We don't report a Rename event for this case, because Rename events // are interpreted as referring to the _old_ name of the file, and in // this case the event would refer to the new name of the file. This // type of rename event is not supported by fsnotify. // inotify reports a Remove event in this case, so we simulate this // here. if !w.sendEvent(path, Remove) { return nil } // Don't keep watching the file that was removed reRegister = false } // The file is gone, nothing left to do. if !reRegister { if watchedDir { w.mu.Lock() delete(w.dirs, path) w.mu.Unlock() } if watchedPath { w.mu.Lock() delete(w.watches, path) w.mu.Unlock() } return nil } // If we didn't get a deletion the file still exists and we're going to have // to watch it again. Let's Stat it now so that we can compare permissions // and have what we need to continue watching the file stat, err := os.Lstat(path) if err != nil { // This is unexpected, but we should still emit an event. This happens // most often on "rm -r" of a subdirectory inside a watched directory We // get a modify event of something happening inside, but by the time we // get here, the sudirectory is already gone. Clearly we were watching // this path but now it is gone. Let's tell the user that it was // removed. if !w.sendEvent(path, Remove) { return nil } // Suppress extra write events on removed directories; they are not // informative and can be confusing. return nil } // resolve symlinks that were explicitly watched as we would have at Add() // time. this helps suppress spurious Chmod events on watched symlinks if isWatched { stat, err = os.Stat(path) if err != nil { // The symlink still exists, but the target is gone. Report the // Remove similar to above. if !w.sendEvent(path, Remove) { return nil } // Don't return the error } } if events&unix.FILE_MODIFIED != 0 { if fmode.IsDir() { if watchedDir { if err := w.updateDirectory(path); err != nil { return err } } else { if !w.sendEvent(path, Write) { return nil } } } else { if !w.sendEvent(path, Write) { return nil } } } if events&unix.FILE_ATTRIB != 0 && stat != nil { // Only send Chmod if perms changed if stat.Mode().Perm() != fmode.Perm() { if !w.sendEvent(path, Chmod) { return nil } } } if stat != nil { // If we get here, it means we've hit an event above that requires us to // continue watching the file or directory return w.associateFile(path, stat, isWatched) } return nil } func (w *Watcher) updateDirectory(path string) error { // The directory was modified, so we must find unwatched entities and watch // them. If something was removed from the directory, nothing will happen, // as everything else should still be watched. files, err := os.ReadDir(path) if err != nil { return err } for _, entry := range files { path := filepath.Join(path, entry.Name()) if w.port.PathIsWatched(path) { continue } finfo, err := entry.Info() if err != nil { return err } err = w.associateFile(path, finfo, false) if err != nil { if !w.sendError(err) { return nil } } if !w.sendEvent(path, Create) { return nil } } return nil } func (w *Watcher) associateFile(path string, stat os.FileInfo, follow bool) error { if w.isClosed() { return ErrClosed } // This is primarily protecting the call to AssociatePath but it is // important and intentional that the call to PathIsWatched is also // protected by this mutex. Without this mutex, AssociatePath has been seen // to error out that the path is already associated. w.mu.Lock() defer w.mu.Unlock() if w.port.PathIsWatched(path) { // Remove the old association in favor of this one If we get ENOENT, // then while the x/sys/unix wrapper still thought that this path was // associated, the underlying event port did not. This call will have // cleared up that discrepancy. The most likely cause is that the event // has fired but we haven't processed it yet. err := w.port.DissociatePath(path) if err != nil && err != unix.ENOENT { return err } } // FILE_NOFOLLOW means we watch symlinks themselves rather than their // targets. events := unix.FILE_MODIFIED | unix.FILE_ATTRIB | unix.FILE_NOFOLLOW if follow { // We *DO* follow symlinks for explicitly watched entries. events = unix.FILE_MODIFIED | unix.FILE_ATTRIB } return w.port.AssociatePath(path, stat, events, stat.Mode()) } func (w *Watcher) dissociateFile(path string, stat os.FileInfo, unused bool) error { if !w.port.PathIsWatched(path) { return nil } return w.port.DissociatePath(path) } // WatchList returns all paths explicitly added with [Watcher.Add] (and are not // yet removed). // // Returns nil if [Watcher.Close] was called. func (w *Watcher) WatchList() []string { if w.isClosed() { return nil } w.mu.Lock() defer w.mu.Unlock() entries := make([]string, 0, len(w.watches)+len(w.dirs)) for pathname := range w.dirs { entries = append(entries, pathname) } for pathname := range w.watches { entries = append(entries, pathname) } return entries }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/fsnotify/fsnotify/system_darwin.go
vendor/github.com/fsnotify/fsnotify/system_darwin.go
//go:build darwin // +build darwin package fsnotify import "golang.org/x/sys/unix" // note: this constant is not defined on BSD const openMode = unix.O_EVTONLY | unix.O_CLOEXEC
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/fsnotify/fsnotify/backend_windows.go
vendor/github.com/fsnotify/fsnotify/backend_windows.go
//go:build windows // +build windows // Windows backend based on ReadDirectoryChangesW() // // https://learn.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-readdirectorychangesw // // Note: the documentation on the Watcher type and methods is generated from // mkdoc.zsh package fsnotify import ( "errors" "fmt" "os" "path/filepath" "reflect" "runtime" "strings" "sync" "unsafe" "golang.org/x/sys/windows" ) // Watcher watches a set of paths, delivering events on a channel. // // A watcher should not be copied (e.g. pass it by pointer, rather than by // value). // // # Linux notes // // When a file is removed a Remove event won't be emitted until all file // descriptors are closed, and deletes will always emit a Chmod. For example: // // fp := os.Open("file") // os.Remove("file") // Triggers Chmod // fp.Close() // Triggers Remove // // This is the event that inotify sends, so not much can be changed about this. // // The fs.inotify.max_user_watches sysctl variable specifies the upper limit // for the number of watches per user, and fs.inotify.max_user_instances // specifies the maximum number of inotify instances per user. Every Watcher you // create is an "instance", and every path you add is a "watch". // // These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and // /proc/sys/fs/inotify/max_user_instances // // To increase them you can use sysctl or write the value to the /proc file: // // # Default values on Linux 5.18 // sysctl fs.inotify.max_user_watches=124983 // sysctl fs.inotify.max_user_instances=128 // // To make the changes persist on reboot edit /etc/sysctl.conf or // /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check // your distro's documentation): // // fs.inotify.max_user_watches=124983 // fs.inotify.max_user_instances=128 // // Reaching the limit will result in a "no space left on device" or "too many open // files" error. // // # kqueue notes (macOS, BSD) // // kqueue requires opening a file descriptor for every file that's being watched; // so if you're watching a directory with five files then that's six file // descriptors. You will run in to your system's "max open files" limit faster on // these platforms. // // The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to // control the maximum number of open files, as well as /etc/login.conf on BSD // systems. // // # Windows notes // // Paths can be added as "C:\path\to\dir", but forward slashes // ("C:/path/to/dir") will also work. // // When a watched directory is removed it will always send an event for the // directory itself, but may not send events for all files in that directory. // Sometimes it will send events for all times, sometimes it will send no // events, and often only for some files. // // The default ReadDirectoryChangesW() buffer size is 64K, which is the largest // value that is guaranteed to work with SMB filesystems. If you have many // events in quick succession this may not be enough, and you will have to use // [WithBufferSize] to increase the value. type Watcher struct { // Events sends the filesystem change events. // // fsnotify can send the following events; a "path" here can refer to a // file, directory, symbolic link, or special file like a FIFO. // // fsnotify.Create A new path was created; this may be followed by one // or more Write events if data also gets written to a // file. // // fsnotify.Remove A path was removed. // // fsnotify.Rename A path was renamed. A rename is always sent with the // old path as Event.Name, and a Create event will be // sent with the new name. Renames are only sent for // paths that are currently watched; e.g. moving an // unmonitored file into a monitored directory will // show up as just a Create. Similarly, renaming a file // to outside a monitored directory will show up as // only a Rename. // // fsnotify.Write A file or named pipe was written to. A Truncate will // also trigger a Write. A single "write action" // initiated by the user may show up as one or multiple // writes, depending on when the system syncs things to // disk. For example when compiling a large Go program // you may get hundreds of Write events, and you may // want to wait until you've stopped receiving them // (see the dedup example in cmd/fsnotify). // // Some systems may send Write event for directories // when the directory content changes. // // fsnotify.Chmod Attributes were changed. On Linux this is also sent // when a file is removed (or more accurately, when a // link to an inode is removed). On kqueue it's sent // when a file is truncated. On Windows it's never // sent. Events chan Event // Errors sends any errors. // // ErrEventOverflow is used to indicate there are too many events: // // - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl) // - windows: The buffer size is too small; WithBufferSize() can be used to increase it. // - kqueue, fen: Not used. Errors chan error port windows.Handle // Handle to completion port input chan *input // Inputs to the reader are sent on this channel quit chan chan<- error mu sync.Mutex // Protects access to watches, closed watches watchMap // Map of watches (key: i-number) closed bool // Set to true when Close() is first called } // NewWatcher creates a new Watcher. func NewWatcher() (*Watcher, error) { return NewBufferedWatcher(50) } // NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events // channel. // // The main use case for this is situations with a very large number of events // where the kernel buffer size can't be increased (e.g. due to lack of // permissions). An unbuffered Watcher will perform better for almost all use // cases, and whenever possible you will be better off increasing the kernel // buffers instead of adding a large userspace buffer. func NewBufferedWatcher(sz uint) (*Watcher, error) { port, err := windows.CreateIoCompletionPort(windows.InvalidHandle, 0, 0, 0) if err != nil { return nil, os.NewSyscallError("CreateIoCompletionPort", err) } w := &Watcher{ port: port, watches: make(watchMap), input: make(chan *input, 1), Events: make(chan Event, sz), Errors: make(chan error), quit: make(chan chan<- error, 1), } go w.readEvents() return w, nil } func (w *Watcher) isClosed() bool { w.mu.Lock() defer w.mu.Unlock() return w.closed } func (w *Watcher) sendEvent(name string, mask uint64) bool { if mask == 0 { return false } event := w.newEvent(name, uint32(mask)) select { case ch := <-w.quit: w.quit <- ch case w.Events <- event: } return true } // Returns true if the error was sent, or false if watcher is closed. func (w *Watcher) sendError(err error) bool { select { case w.Errors <- err: return true case <-w.quit: } return false } // Close removes all watches and closes the Events channel. func (w *Watcher) Close() error { if w.isClosed() { return nil } w.mu.Lock() w.closed = true w.mu.Unlock() // Send "quit" message to the reader goroutine ch := make(chan error) w.quit <- ch if err := w.wakeupReader(); err != nil { return err } return <-ch } // Add starts monitoring the path for changes. // // A path can only be watched once; watching it more than once is a no-op and will // not return an error. Paths that do not yet exist on the filesystem cannot be // watched. // // A watch will be automatically removed if the watched path is deleted or // renamed. The exception is the Windows backend, which doesn't remove the // watcher on renames. // // Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special // filesystems (/proc, /sys, etc.) generally don't work. // // Returns [ErrClosed] if [Watcher.Close] was called. // // See [Watcher.AddWith] for a version that allows adding options. // // # Watching directories // // All files in a directory are monitored, including new files that are created // after the watcher is started. Subdirectories are not watched (i.e. it's // non-recursive). // // # Watching files // // Watching individual files (rather than directories) is generally not // recommended as many programs (especially editors) update files atomically: it // will write to a temporary file which is then moved to to destination, // overwriting the original (or some variant thereof). The watcher on the // original file is now lost, as that no longer exists. // // The upshot of this is that a power failure or crash won't leave a // half-written file. // // Watch the parent directory and use Event.Name to filter out files you're not // interested in. There is an example of this in cmd/fsnotify/file.go. func (w *Watcher) Add(name string) error { return w.AddWith(name) } // AddWith is like [Watcher.Add], but allows adding options. When using Add() // the defaults described below are used. // // Possible options are: // // - [WithBufferSize] sets the buffer size for the Windows backend; no-op on // other platforms. The default is 64K (65536 bytes). func (w *Watcher) AddWith(name string, opts ...addOpt) error { if w.isClosed() { return ErrClosed } with := getOptions(opts...) if with.bufsize < 4096 { return fmt.Errorf("fsnotify.WithBufferSize: buffer size cannot be smaller than 4096 bytes") } in := &input{ op: opAddWatch, path: filepath.Clean(name), flags: sysFSALLEVENTS, reply: make(chan error), bufsize: with.bufsize, } w.input <- in if err := w.wakeupReader(); err != nil { return err } return <-in.reply } // Remove stops monitoring the path for changes. // // Directories are always removed non-recursively. For example, if you added // /tmp/dir and /tmp/dir/subdir then you will need to remove both. // // Removing a path that has not yet been added returns [ErrNonExistentWatch]. // // Returns nil if [Watcher.Close] was called. func (w *Watcher) Remove(name string) error { if w.isClosed() { return nil } in := &input{ op: opRemoveWatch, path: filepath.Clean(name), reply: make(chan error), } w.input <- in if err := w.wakeupReader(); err != nil { return err } return <-in.reply } // WatchList returns all paths explicitly added with [Watcher.Add] (and are not // yet removed). // // Returns nil if [Watcher.Close] was called. func (w *Watcher) WatchList() []string { if w.isClosed() { return nil } w.mu.Lock() defer w.mu.Unlock() entries := make([]string, 0, len(w.watches)) for _, entry := range w.watches { for _, watchEntry := range entry { entries = append(entries, watchEntry.path) } } return entries } // These options are from the old golang.org/x/exp/winfsnotify, where you could // add various options to the watch. This has long since been removed. // // The "sys" in the name is misleading as they're not part of any "system". // // This should all be removed at some point, and just use windows.FILE_NOTIFY_* const ( sysFSALLEVENTS = 0xfff sysFSCREATE = 0x100 sysFSDELETE = 0x200 sysFSDELETESELF = 0x400 sysFSMODIFY = 0x2 sysFSMOVE = 0xc0 sysFSMOVEDFROM = 0x40 sysFSMOVEDTO = 0x80 sysFSMOVESELF = 0x800 sysFSIGNORED = 0x8000 ) func (w *Watcher) newEvent(name string, mask uint32) Event { e := Event{Name: name} if mask&sysFSCREATE == sysFSCREATE || mask&sysFSMOVEDTO == sysFSMOVEDTO { e.Op |= Create } if mask&sysFSDELETE == sysFSDELETE || mask&sysFSDELETESELF == sysFSDELETESELF { e.Op |= Remove } if mask&sysFSMODIFY == sysFSMODIFY { e.Op |= Write } if mask&sysFSMOVE == sysFSMOVE || mask&sysFSMOVESELF == sysFSMOVESELF || mask&sysFSMOVEDFROM == sysFSMOVEDFROM { e.Op |= Rename } return e } const ( opAddWatch = iota opRemoveWatch ) const ( provisional uint64 = 1 << (32 + iota) ) type input struct { op int path string flags uint32 bufsize int reply chan error } type inode struct { handle windows.Handle volume uint32 index uint64 } type watch struct { ov windows.Overlapped ino *inode // i-number recurse bool // Recursive watch? path string // Directory path mask uint64 // Directory itself is being watched with these notify flags names map[string]uint64 // Map of names being watched and their notify flags rename string // Remembers the old name while renaming a file buf []byte // buffer, allocated later } type ( indexMap map[uint64]*watch watchMap map[uint32]indexMap ) func (w *Watcher) wakeupReader() error { err := windows.PostQueuedCompletionStatus(w.port, 0, 0, nil) if err != nil { return os.NewSyscallError("PostQueuedCompletionStatus", err) } return nil } func (w *Watcher) getDir(pathname string) (dir string, err error) { attr, err := windows.GetFileAttributes(windows.StringToUTF16Ptr(pathname)) if err != nil { return "", os.NewSyscallError("GetFileAttributes", err) } if attr&windows.FILE_ATTRIBUTE_DIRECTORY != 0 { dir = pathname } else { dir, _ = filepath.Split(pathname) dir = filepath.Clean(dir) } return } func (w *Watcher) getIno(path string) (ino *inode, err error) { h, err := windows.CreateFile(windows.StringToUTF16Ptr(path), windows.FILE_LIST_DIRECTORY, windows.FILE_SHARE_READ|windows.FILE_SHARE_WRITE|windows.FILE_SHARE_DELETE, nil, windows.OPEN_EXISTING, windows.FILE_FLAG_BACKUP_SEMANTICS|windows.FILE_FLAG_OVERLAPPED, 0) if err != nil { return nil, os.NewSyscallError("CreateFile", err) } var fi windows.ByHandleFileInformation err = windows.GetFileInformationByHandle(h, &fi) if err != nil { windows.CloseHandle(h) return nil, os.NewSyscallError("GetFileInformationByHandle", err) } ino = &inode{ handle: h, volume: fi.VolumeSerialNumber, index: uint64(fi.FileIndexHigh)<<32 | uint64(fi.FileIndexLow), } return ino, nil } // Must run within the I/O thread. func (m watchMap) get(ino *inode) *watch { if i := m[ino.volume]; i != nil { return i[ino.index] } return nil } // Must run within the I/O thread. func (m watchMap) set(ino *inode, watch *watch) { i := m[ino.volume] if i == nil { i = make(indexMap) m[ino.volume] = i } i[ino.index] = watch } // Must run within the I/O thread. func (w *Watcher) addWatch(pathname string, flags uint64, bufsize int) error { //pathname, recurse := recursivePath(pathname) recurse := false dir, err := w.getDir(pathname) if err != nil { return err } ino, err := w.getIno(dir) if err != nil { return err } w.mu.Lock() watchEntry := w.watches.get(ino) w.mu.Unlock() if watchEntry == nil { _, err := windows.CreateIoCompletionPort(ino.handle, w.port, 0, 0) if err != nil { windows.CloseHandle(ino.handle) return os.NewSyscallError("CreateIoCompletionPort", err) } watchEntry = &watch{ ino: ino, path: dir, names: make(map[string]uint64), recurse: recurse, buf: make([]byte, bufsize), } w.mu.Lock() w.watches.set(ino, watchEntry) w.mu.Unlock() flags |= provisional } else { windows.CloseHandle(ino.handle) } if pathname == dir { watchEntry.mask |= flags } else { watchEntry.names[filepath.Base(pathname)] |= flags } err = w.startRead(watchEntry) if err != nil { return err } if pathname == dir { watchEntry.mask &= ^provisional } else { watchEntry.names[filepath.Base(pathname)] &= ^provisional } return nil } // Must run within the I/O thread. func (w *Watcher) remWatch(pathname string) error { pathname, recurse := recursivePath(pathname) dir, err := w.getDir(pathname) if err != nil { return err } ino, err := w.getIno(dir) if err != nil { return err } w.mu.Lock() watch := w.watches.get(ino) w.mu.Unlock() if recurse && !watch.recurse { return fmt.Errorf("can't use \\... with non-recursive watch %q", pathname) } err = windows.CloseHandle(ino.handle) if err != nil { w.sendError(os.NewSyscallError("CloseHandle", err)) } if watch == nil { return fmt.Errorf("%w: %s", ErrNonExistentWatch, pathname) } if pathname == dir { w.sendEvent(watch.path, watch.mask&sysFSIGNORED) watch.mask = 0 } else { name := filepath.Base(pathname) w.sendEvent(filepath.Join(watch.path, name), watch.names[name]&sysFSIGNORED) delete(watch.names, name) } return w.startRead(watch) } // Must run within the I/O thread. func (w *Watcher) deleteWatch(watch *watch) { for name, mask := range watch.names { if mask&provisional == 0 { w.sendEvent(filepath.Join(watch.path, name), mask&sysFSIGNORED) } delete(watch.names, name) } if watch.mask != 0 { if watch.mask&provisional == 0 { w.sendEvent(watch.path, watch.mask&sysFSIGNORED) } watch.mask = 0 } } // Must run within the I/O thread. func (w *Watcher) startRead(watch *watch) error { err := windows.CancelIo(watch.ino.handle) if err != nil { w.sendError(os.NewSyscallError("CancelIo", err)) w.deleteWatch(watch) } mask := w.toWindowsFlags(watch.mask) for _, m := range watch.names { mask |= w.toWindowsFlags(m) } if mask == 0 { err := windows.CloseHandle(watch.ino.handle) if err != nil { w.sendError(os.NewSyscallError("CloseHandle", err)) } w.mu.Lock() delete(w.watches[watch.ino.volume], watch.ino.index) w.mu.Unlock() return nil } // We need to pass the array, rather than the slice. hdr := (*reflect.SliceHeader)(unsafe.Pointer(&watch.buf)) rdErr := windows.ReadDirectoryChanges(watch.ino.handle, (*byte)(unsafe.Pointer(hdr.Data)), uint32(hdr.Len), watch.recurse, mask, nil, &watch.ov, 0) if rdErr != nil { err := os.NewSyscallError("ReadDirectoryChanges", rdErr) if rdErr == windows.ERROR_ACCESS_DENIED && watch.mask&provisional == 0 { // Watched directory was probably removed w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) err = nil } w.deleteWatch(watch) w.startRead(watch) return err } return nil } // readEvents reads from the I/O completion port, converts the // received events into Event objects and sends them via the Events channel. // Entry point to the I/O thread. func (w *Watcher) readEvents() { var ( n uint32 key uintptr ov *windows.Overlapped ) runtime.LockOSThread() for { // This error is handled after the watch == nil check below. qErr := windows.GetQueuedCompletionStatus(w.port, &n, &key, &ov, windows.INFINITE) watch := (*watch)(unsafe.Pointer(ov)) if watch == nil { select { case ch := <-w.quit: w.mu.Lock() var indexes []indexMap for _, index := range w.watches { indexes = append(indexes, index) } w.mu.Unlock() for _, index := range indexes { for _, watch := range index { w.deleteWatch(watch) w.startRead(watch) } } err := windows.CloseHandle(w.port) if err != nil { err = os.NewSyscallError("CloseHandle", err) } close(w.Events) close(w.Errors) ch <- err return case in := <-w.input: switch in.op { case opAddWatch: in.reply <- w.addWatch(in.path, uint64(in.flags), in.bufsize) case opRemoveWatch: in.reply <- w.remWatch(in.path) } default: } continue } switch qErr { case nil: // No error case windows.ERROR_MORE_DATA: if watch == nil { w.sendError(errors.New("ERROR_MORE_DATA has unexpectedly null lpOverlapped buffer")) } else { // The i/o succeeded but the buffer is full. // In theory we should be building up a full packet. // In practice we can get away with just carrying on. n = uint32(unsafe.Sizeof(watch.buf)) } case windows.ERROR_ACCESS_DENIED: // Watched directory was probably removed w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) w.deleteWatch(watch) w.startRead(watch) continue case windows.ERROR_OPERATION_ABORTED: // CancelIo was called on this handle continue default: w.sendError(os.NewSyscallError("GetQueuedCompletionPort", qErr)) continue } var offset uint32 for { if n == 0 { w.sendError(ErrEventOverflow) break } // Point "raw" to the event in the buffer raw := (*windows.FileNotifyInformation)(unsafe.Pointer(&watch.buf[offset])) // Create a buf that is the size of the path name size := int(raw.FileNameLength / 2) var buf []uint16 // TODO: Use unsafe.Slice in Go 1.17; https://stackoverflow.com/questions/51187973 sh := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) sh.Data = uintptr(unsafe.Pointer(&raw.FileName)) sh.Len = size sh.Cap = size name := windows.UTF16ToString(buf) fullname := filepath.Join(watch.path, name) var mask uint64 switch raw.Action { case windows.FILE_ACTION_REMOVED: mask = sysFSDELETESELF case windows.FILE_ACTION_MODIFIED: mask = sysFSMODIFY case windows.FILE_ACTION_RENAMED_OLD_NAME: watch.rename = name case windows.FILE_ACTION_RENAMED_NEW_NAME: // Update saved path of all sub-watches. old := filepath.Join(watch.path, watch.rename) w.mu.Lock() for _, watchMap := range w.watches { for _, ww := range watchMap { if strings.HasPrefix(ww.path, old) { ww.path = filepath.Join(fullname, strings.TrimPrefix(ww.path, old)) } } } w.mu.Unlock() if watch.names[watch.rename] != 0 { watch.names[name] |= watch.names[watch.rename] delete(watch.names, watch.rename) mask = sysFSMOVESELF } } sendNameEvent := func() { w.sendEvent(fullname, watch.names[name]&mask) } if raw.Action != windows.FILE_ACTION_RENAMED_NEW_NAME { sendNameEvent() } if raw.Action == windows.FILE_ACTION_REMOVED { w.sendEvent(fullname, watch.names[name]&sysFSIGNORED) delete(watch.names, name) } w.sendEvent(fullname, watch.mask&w.toFSnotifyFlags(raw.Action)) if raw.Action == windows.FILE_ACTION_RENAMED_NEW_NAME { fullname = filepath.Join(watch.path, watch.rename) sendNameEvent() } // Move to the next event in the buffer if raw.NextEntryOffset == 0 { break } offset += raw.NextEntryOffset // Error! if offset >= n { //lint:ignore ST1005 Windows should be capitalized w.sendError(errors.New( "Windows system assumed buffer larger than it is, events have likely been missed")) break } } if err := w.startRead(watch); err != nil { w.sendError(err) } } } func (w *Watcher) toWindowsFlags(mask uint64) uint32 { var m uint32 if mask&sysFSMODIFY != 0 { m |= windows.FILE_NOTIFY_CHANGE_LAST_WRITE } if mask&(sysFSMOVE|sysFSCREATE|sysFSDELETE) != 0 { m |= windows.FILE_NOTIFY_CHANGE_FILE_NAME | windows.FILE_NOTIFY_CHANGE_DIR_NAME } return m } func (w *Watcher) toFSnotifyFlags(action uint32) uint64 { switch action { case windows.FILE_ACTION_ADDED: return sysFSCREATE case windows.FILE_ACTION_REMOVED: return sysFSDELETE case windows.FILE_ACTION_MODIFIED: return sysFSMODIFY case windows.FILE_ACTION_RENAMED_OLD_NAME: return sysFSMOVEDFROM case windows.FILE_ACTION_RENAMED_NEW_NAME: return sysFSMOVEDTO } return 0 }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/remyoudompheng/bigfft/arith_decl.go
vendor/github.com/remyoudompheng/bigfft/arith_decl.go
// Copyright 2010 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package bigfft import ( "math/big" _ "unsafe" ) type Word = big.Word //go:linkname addVV math/big.addVV func addVV(z, x, y []Word) (c Word) //go:linkname subVV math/big.subVV func subVV(z, x, y []Word) (c Word) //go:linkname addVW math/big.addVW func addVW(z, x []Word, y Word) (c Word) //go:linkname subVW math/big.subVW func subVW(z, x []Word, y Word) (c Word) //go:linkname shlVU math/big.shlVU func shlVU(z, x []Word, s uint) (c Word) //go:linkname mulAddVWW math/big.mulAddVWW func mulAddVWW(z, x []Word, y, r Word) (c Word) //go:linkname addMulVVW math/big.addMulVVW func addMulVVW(z, x []Word, y Word) (c Word)
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/remyoudompheng/bigfft/fft.go
vendor/github.com/remyoudompheng/bigfft/fft.go
// Package bigfft implements multiplication of big.Int using FFT. // // The implementation is based on the Schönhage-Strassen method // using integer FFT modulo 2^n+1. package bigfft import ( "math/big" "unsafe" ) const _W = int(unsafe.Sizeof(big.Word(0)) * 8) type nat []big.Word func (n nat) String() string { v := new(big.Int) v.SetBits(n) return v.String() } // fftThreshold is the size (in words) above which FFT is used over // Karatsuba from math/big. // // TestCalibrate seems to indicate a threshold of 60kbits on 32-bit // arches and 110kbits on 64-bit arches. var fftThreshold = 1800 // Mul computes the product x*y and returns z. // It can be used instead of the Mul method of // *big.Int from math/big package. func Mul(x, y *big.Int) *big.Int { xwords := len(x.Bits()) ywords := len(y.Bits()) if xwords > fftThreshold && ywords > fftThreshold { return mulFFT(x, y) } return new(big.Int).Mul(x, y) } func mulFFT(x, y *big.Int) *big.Int { var xb, yb nat = x.Bits(), y.Bits() zb := fftmul(xb, yb) z := new(big.Int) z.SetBits(zb) if x.Sign()*y.Sign() < 0 { z.Neg(z) } return z } // A FFT size of K=1<<k is adequate when K is about 2*sqrt(N) where // N = x.Bitlen() + y.Bitlen(). func fftmul(x, y nat) nat { k, m := fftSize(x, y) xp := polyFromNat(x, k, m) yp := polyFromNat(y, k, m) rp := xp.Mul(&yp) return rp.Int() } // fftSizeThreshold[i] is the maximal size (in bits) where we should use // fft size i. var fftSizeThreshold = [...]int64{0, 0, 0, 4 << 10, 8 << 10, 16 << 10, // 5 32 << 10, 64 << 10, 1 << 18, 1 << 20, 3 << 20, // 10 8 << 20, 30 << 20, 100 << 20, 300 << 20, 600 << 20, } // returns the FFT length k, m the number of words per chunk // such that m << k is larger than the number of words // in x*y. func fftSize(x, y nat) (k uint, m int) { words := len(x) + len(y) bits := int64(words) * int64(_W) k = uint(len(fftSizeThreshold)) for i := range fftSizeThreshold { if fftSizeThreshold[i] > bits { k = uint(i) break } } // The 1<<k chunks of m words must have N bits so that // 2^N-1 is larger than x*y. That is, m<<k > words m = words>>k + 1 return } // valueSize returns the length (in words) to use for polynomial // coefficients, to compute a correct product of polynomials P*Q // where deg(P*Q) < K (== 1<<k) and where coefficients of P and Q are // less than b^m (== 1 << (m*_W)). // The chosen length (in bits) must be a multiple of 1 << (k-extra). func valueSize(k uint, m int, extra uint) int { // The coefficients of P*Q are less than b^(2m)*K // so we need W * valueSize >= 2*m*W+K n := 2*m*_W + int(k) // necessary bits K := 1 << (k - extra) if K < _W { K = _W } n = ((n / K) + 1) * K // round to a multiple of K return n / _W } // poly represents an integer via a polynomial in Z[x]/(x^K+1) // where K is the FFT length and b^m is the computation basis 1<<(m*_W). // If P = a[0] + a[1] x + ... a[n] x^(K-1), the associated natural number // is P(b^m). type poly struct { k uint // k is such that K = 1<<k. m int // the m such that P(b^m) is the original number. a []nat // a slice of at most K m-word coefficients. } // polyFromNat slices the number x into a polynomial // with 1<<k coefficients made of m words. func polyFromNat(x nat, k uint, m int) poly { p := poly{k: k, m: m} length := len(x)/m + 1 p.a = make([]nat, length) for i := range p.a { if len(x) < m { p.a[i] = make(nat, m) copy(p.a[i], x) break } p.a[i] = x[:m] x = x[m:] } return p } // Int evaluates back a poly to its integer value. func (p *poly) Int() nat { length := len(p.a)*p.m + 1 if na := len(p.a); na > 0 { length += len(p.a[na-1]) } n := make(nat, length) m := p.m np := n for i := range p.a { l := len(p.a[i]) c := addVV(np[:l], np[:l], p.a[i]) if np[l] < ^big.Word(0) { np[l] += c } else { addVW(np[l:], np[l:], c) } np = np[m:] } n = trim(n) return n } func trim(n nat) nat { for i := range n { if n[len(n)-1-i] != 0 { return n[:len(n)-i] } } return nil } // Mul multiplies p and q modulo X^K-1, where K = 1<<p.k. // The product is done via a Fourier transform. func (p *poly) Mul(q *poly) poly { // extra=2 because: // * some power of 2 is a K-th root of unity when n is a multiple of K/2. // * 2 itself is a square (see fermat.ShiftHalf) n := valueSize(p.k, p.m, 2) pv, qv := p.Transform(n), q.Transform(n) rv := pv.Mul(&qv) r := rv.InvTransform() r.m = p.m return r } // A polValues represents the value of a poly at the powers of a // K-th root of unity θ=2^(l/2) in Z/(b^n+1)Z, where b^n = 2^(K/4*l). type polValues struct { k uint // k is such that K = 1<<k. n int // the length of coefficients, n*_W a multiple of K/4. values []fermat // a slice of K (n+1)-word values } // Transform evaluates p at θ^i for i = 0...K-1, where // θ is a K-th primitive root of unity in Z/(b^n+1)Z. func (p *poly) Transform(n int) polValues { k := p.k inputbits := make([]big.Word, (n+1)<<k) input := make([]fermat, 1<<k) // Now computed q(ω^i) for i = 0 ... K-1 valbits := make([]big.Word, (n+1)<<k) values := make([]fermat, 1<<k) for i := range values { input[i] = inputbits[i*(n+1) : (i+1)*(n+1)] if i < len(p.a) { copy(input[i], p.a[i]) } values[i] = fermat(valbits[i*(n+1) : (i+1)*(n+1)]) } fourier(values, input, false, n, k) return polValues{k, n, values} } // InvTransform reconstructs p (modulo X^K - 1) from its // values at θ^i for i = 0..K-1. func (v *polValues) InvTransform() poly { k, n := v.k, v.n // Perform an inverse Fourier transform to recover p. pbits := make([]big.Word, (n+1)<<k) p := make([]fermat, 1<<k) for i := range p { p[i] = fermat(pbits[i*(n+1) : (i+1)*(n+1)]) } fourier(p, v.values, true, n, k) // Divide by K, and untwist q to recover p. u := make(fermat, n+1) a := make([]nat, 1<<k) for i := range p { u.Shift(p[i], -int(k)) copy(p[i], u) a[i] = nat(p[i]) } return poly{k: k, m: 0, a: a} } // NTransform evaluates p at θω^i for i = 0...K-1, where // θ is a (2K)-th primitive root of unity in Z/(b^n+1)Z // and ω = θ². func (p *poly) NTransform(n int) polValues { k := p.k if len(p.a) >= 1<<k { panic("Transform: len(p.a) >= 1<<k") } // θ is represented as a shift. θshift := (n * _W) >> k // p(x) = a_0 + a_1 x + ... + a_{K-1} x^(K-1) // p(θx) = q(x) where // q(x) = a_0 + θa_1 x + ... + θ^(K-1) a_{K-1} x^(K-1) // // Twist p by θ to obtain q. tbits := make([]big.Word, (n+1)<<k) twisted := make([]fermat, 1<<k) src := make(fermat, n+1) for i := range twisted { twisted[i] = fermat(tbits[i*(n+1) : (i+1)*(n+1)]) if i < len(p.a) { for i := range src { src[i] = 0 } copy(src, p.a[i]) twisted[i].Shift(src, θshift*i) } } // Now computed q(ω^i) for i = 0 ... K-1 valbits := make([]big.Word, (n+1)<<k) values := make([]fermat, 1<<k) for i := range values { values[i] = fermat(valbits[i*(n+1) : (i+1)*(n+1)]) } fourier(values, twisted, false, n, k) return polValues{k, n, values} } // InvTransform reconstructs a polynomial from its values at // roots of x^K+1. The m field of the returned polynomial // is unspecified. func (v *polValues) InvNTransform() poly { k := v.k n := v.n θshift := (n * _W) >> k // Perform an inverse Fourier transform to recover q. qbits := make([]big.Word, (n+1)<<k) q := make([]fermat, 1<<k) for i := range q { q[i] = fermat(qbits[i*(n+1) : (i+1)*(n+1)]) } fourier(q, v.values, true, n, k) // Divide by K, and untwist q to recover p. u := make(fermat, n+1) a := make([]nat, 1<<k) for i := range q { u.Shift(q[i], -int(k)-i*θshift) copy(q[i], u) a[i] = nat(q[i]) } return poly{k: k, m: 0, a: a} } // fourier performs an unnormalized Fourier transform // of src, a length 1<<k vector of numbers modulo b^n+1 // where b = 1<<_W. func fourier(dst []fermat, src []fermat, backward bool, n int, k uint) { var rec func(dst, src []fermat, size uint) tmp := make(fermat, n+1) // pre-allocate temporary variables. tmp2 := make(fermat, n+1) // pre-allocate temporary variables. // The recursion function of the FFT. // The root of unity used in the transform is ω=1<<(ω2shift/2). // The source array may use shifted indices (i.e. the i-th // element is src[i << idxShift]). rec = func(dst, src []fermat, size uint) { idxShift := k - size ω2shift := (4 * n * _W) >> size if backward { ω2shift = -ω2shift } // Easy cases. if len(src[0]) != n+1 || len(dst[0]) != n+1 { panic("len(src[0]) != n+1 || len(dst[0]) != n+1") } switch size { case 0: copy(dst[0], src[0]) return case 1: dst[0].Add(src[0], src[1<<idxShift]) // dst[0] = src[0] + src[1] dst[1].Sub(src[0], src[1<<idxShift]) // dst[1] = src[0] - src[1] return } // Let P(x) = src[0] + src[1<<idxShift] * x + ... + src[K-1 << idxShift] * x^(K-1) // The P(x) = Q1(x²) + x*Q2(x²) // where Q1's coefficients are src with indices shifted by 1 // where Q2's coefficients are src[1<<idxShift:] with indices shifted by 1 // Split destination vectors in halves. dst1 := dst[:1<<(size-1)] dst2 := dst[1<<(size-1):] // Transform Q1 and Q2 in the halves. rec(dst1, src, size-1) rec(dst2, src[1<<idxShift:], size-1) // Reconstruct P's transform from transforms of Q1 and Q2. // dst[i] is dst1[i] + ω^i * dst2[i] // dst[i + 1<<(k-1)] is dst1[i] + ω^(i+K/2) * dst2[i] // for i := range dst1 { tmp.ShiftHalf(dst2[i], i*ω2shift, tmp2) // ω^i * dst2[i] dst2[i].Sub(dst1[i], tmp) dst1[i].Add(dst1[i], tmp) } } rec(dst, src, k) } // Mul returns the pointwise product of p and q. func (p *polValues) Mul(q *polValues) (r polValues) { n := p.n r.k, r.n = p.k, p.n r.values = make([]fermat, len(p.values)) bits := make([]big.Word, len(p.values)*(n+1)) buf := make(fermat, 8*n) for i := range r.values { r.values[i] = bits[i*(n+1) : (i+1)*(n+1)] z := buf.Mul(p.values[i], q.values[i]) copy(r.values[i], z) } return }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/remyoudompheng/bigfft/scan.go
vendor/github.com/remyoudompheng/bigfft/scan.go
package bigfft import ( "math/big" ) // FromDecimalString converts the base 10 string // representation of a natural (non-negative) number // into a *big.Int. // Its asymptotic complexity is less than quadratic. func FromDecimalString(s string) *big.Int { var sc scanner z := new(big.Int) sc.scan(z, s) return z } type scanner struct { // powers[i] is 10^(2^i * quadraticScanThreshold). powers []*big.Int } func (s *scanner) chunkSize(size int) (int, *big.Int) { if size <= quadraticScanThreshold { panic("size < quadraticScanThreshold") } pow := uint(0) for n := size; n > quadraticScanThreshold; n /= 2 { pow++ } // threshold * 2^(pow-1) <= size < threshold * 2^pow return quadraticScanThreshold << (pow - 1), s.power(pow - 1) } func (s *scanner) power(k uint) *big.Int { for i := len(s.powers); i <= int(k); i++ { z := new(big.Int) if i == 0 { if quadraticScanThreshold%14 != 0 { panic("quadraticScanThreshold % 14 != 0") } z.Exp(big.NewInt(1e14), big.NewInt(quadraticScanThreshold/14), nil) } else { z.Mul(s.powers[i-1], s.powers[i-1]) } s.powers = append(s.powers, z) } return s.powers[k] } func (s *scanner) scan(z *big.Int, str string) { if len(str) <= quadraticScanThreshold { z.SetString(str, 10) return } sz, pow := s.chunkSize(len(str)) // Scan the left half. s.scan(z, str[:len(str)-sz]) // FIXME: reuse temporaries. left := Mul(z, pow) // Scan the right half s.scan(z, str[len(str)-sz:]) z.Add(z, left) } // quadraticScanThreshold is the number of digits // below which big.Int.SetString is more efficient // than subquadratic algorithms. // 1232 digits fit in 4096 bits. const quadraticScanThreshold = 1232
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/remyoudompheng/bigfft/fermat.go
vendor/github.com/remyoudompheng/bigfft/fermat.go
package bigfft import ( "math/big" ) // Arithmetic modulo 2^n+1. // A fermat of length w+1 represents a number modulo 2^(w*_W) + 1. The last // word is zero or one. A number has at most two representatives satisfying the // 0-1 last word constraint. type fermat nat func (n fermat) String() string { return nat(n).String() } func (z fermat) norm() { n := len(z) - 1 c := z[n] if c == 0 { return } if z[0] >= c { z[n] = 0 z[0] -= c return } // z[0] < z[n]. subVW(z, z, c) // Substract c if c > 1 { z[n] -= c - 1 c = 1 } // Add back c. if z[n] == 1 { z[n] = 0 return } else { addVW(z, z, 1) } } // Shift computes (x << k) mod (2^n+1). func (z fermat) Shift(x fermat, k int) { if len(z) != len(x) { panic("len(z) != len(x) in Shift") } n := len(x) - 1 // Shift by n*_W is taking the opposite. k %= 2 * n * _W if k < 0 { k += 2 * n * _W } neg := false if k >= n*_W { k -= n * _W neg = true } kw, kb := k/_W, k%_W z[n] = 1 // Add (-1) if !neg { for i := 0; i < kw; i++ { z[i] = 0 } // Shift left by kw words. // x = a·2^(n-k) + b // x<<k = (b<<k) - a copy(z[kw:], x[:n-kw]) b := subVV(z[:kw+1], z[:kw+1], x[n-kw:]) if z[kw+1] > 0 { z[kw+1] -= b } else { subVW(z[kw+1:], z[kw+1:], b) } } else { for i := kw + 1; i < n; i++ { z[i] = 0 } // Shift left and negate, by kw words. copy(z[:kw+1], x[n-kw:n+1]) // z_low = x_high b := subVV(z[kw:n], z[kw:n], x[:n-kw]) // z_high -= x_low z[n] -= b } // Add back 1. if z[n] > 0 { z[n]-- } else if z[0] < ^big.Word(0) { z[0]++ } else { addVW(z, z, 1) } // Shift left by kb bits shlVU(z, z, uint(kb)) z.norm() } // ShiftHalf shifts x by k/2 bits the left. Shifting by 1/2 bit // is multiplication by sqrt(2) mod 2^n+1 which is 2^(3n/4) - 2^(n/4). // A temporary buffer must be provided in tmp. func (z fermat) ShiftHalf(x fermat, k int, tmp fermat) { n := len(z) - 1 if k%2 == 0 { z.Shift(x, k/2) return } u := (k - 1) / 2 a := u + (3*_W/4)*n b := u + (_W/4)*n z.Shift(x, a) tmp.Shift(x, b) z.Sub(z, tmp) } // Add computes addition mod 2^n+1. func (z fermat) Add(x, y fermat) fermat { if len(z) != len(x) { panic("Add: len(z) != len(x)") } addVV(z, x, y) // there cannot be a carry here. z.norm() return z } // Sub computes substraction mod 2^n+1. func (z fermat) Sub(x, y fermat) fermat { if len(z) != len(x) { panic("Add: len(z) != len(x)") } n := len(y) - 1 b := subVV(z[:n], x[:n], y[:n]) b += y[n] // If b > 0, we need to subtract b<<n, which is the same as adding b. z[n] = x[n] if z[0] <= ^big.Word(0)-b { z[0] += b } else { addVW(z, z, b) } z.norm() return z } func (z fermat) Mul(x, y fermat) fermat { if len(x) != len(y) { panic("Mul: len(x) != len(y)") } n := len(x) - 1 if n < 30 { z = z[:2*n+2] basicMul(z, x, y) z = z[:2*n+1] } else { var xi, yi, zi big.Int xi.SetBits(x) yi.SetBits(y) zi.SetBits(z) zb := zi.Mul(&xi, &yi).Bits() if len(zb) <= n { // Short product. copy(z, zb) for i := len(zb); i < len(z); i++ { z[i] = 0 } return z } z = zb } // len(z) is at most 2n+1. if len(z) > 2*n+1 { panic("len(z) > 2n+1") } // We now have // z = z[:n] + 1<<(n*W) * z[n:2n+1] // which normalizes to: // z = z[:n] - z[n:2n] + z[2n] c1 := big.Word(0) if len(z) > 2*n { c1 = addVW(z[:n], z[:n], z[2*n]) } c2 := big.Word(0) if len(z) >= 2*n { c2 = subVV(z[:n], z[:n], z[n:2*n]) } else { m := len(z) - n c2 = subVV(z[:m], z[:m], z[n:]) c2 = subVW(z[m:n], z[m:n], c2) } // Restore carries. // Substracting z[n] -= c2 is the same // as z[0] += c2 z = z[:n+1] z[n] = c1 c := addVW(z, z, c2) if c != 0 { panic("impossible") } z.norm() return z } // copied from math/big // // basicMul multiplies x and y and leaves the result in z. // The (non-normalized) result is placed in z[0 : len(x) + len(y)]. func basicMul(z, x, y fermat) { // initialize z for i := 0; i < len(z); i++ { z[i] = 0 } for i, d := range y { if d != 0 { z[len(x)+i] = addMulVVW(z[i:i+len(x)], x, d) } } }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/ovirt/go-ovirt/readers.go
vendor/github.com/ovirt/go-ovirt/readers.go
// Copyright (c) 2017 Joey <majunjiev@gmail.com>. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package ovirtsdk import ( "encoding/xml" "io" "strconv" ) func XMLAffinityGroupReadOne(reader *XMLReader, start *xml.StartElement, expectedTag string) (*AffinityGroup, error) { builder := NewAffinityGroupBuilder() if start == nil { st, err := reader.FindStartElement() if err != nil { if err == io.EOF { return nil, nil } return nil, err } start = st } if expectedTag == "" { expectedTag = "affinity_group" } if start.Name.Local != expectedTag { return nil, XMLTagNotMatchError{start.Name.Local, expectedTag} } // Process the attributes for _, attr := range start.Attr { name := attr.Name.Local value := attr.Value switch name { case "id": builder.Id(value) case "href": builder.Href(value) } } var links []Link depth := 1 for depth > 0 { t, err := reader.Next() if err != nil { if err == io.EOF { break } return nil, err } t = xml.CopyToken(t) switch t := t.(type) { case xml.StartElement: switch t.Name.Local { case "cluster": v, err := XMLClusterReadOne(reader, &t, "cluster") if err != nil { return nil, err } builder.Cluster(v) case "comment": v, err := reader.ReadString(&t) if err != nil { return nil, err } builder.Comment(v) case "description": v, err := reader.ReadString(&t) if err != nil { return nil, err } builder.Description(v) case "enforcing": v, err := reader.ReadBool(&t) if err != nil { return nil, err } builder.Enforcing(v) case "host_labels": v, err := XMLAffinityLabelReadMany(reader, &t) if err != nil { return nil, err } builder.HostLabels(v) case "hosts": v, err := XMLHostReadMany(reader, &t) if err != nil { return nil, err } builder.Hosts(v) case "hosts_rule": v, err := XMLAffinityRuleReadOne(reader, &t, "hosts_rule") if err != nil { return nil, err } builder.HostsRule(v) case "name": v, err := reader.ReadString(&t) if err != nil { return nil, err } builder.Name(v) case "positive": v, err := reader.ReadBool(&t) if err != nil { return nil, err } builder.Positive(v) case "priority": v, err := reader.ReadFloat64(&t) if err != nil { return nil, err } builder.Priority(v) case "vm_labels": v, err := XMLAffinityLabelReadMany(reader, &t) if err != nil { return nil, err } builder.VmLabels(v) case "vms": v, err := XMLVmReadMany(reader, &t) if err != nil { return nil, err } builder.Vms(v) case "vms_rule": v, err := XMLAffinityRuleReadOne(reader, &t, "vms_rule") if err != nil { return nil, err } builder.VmsRule(v) case "link": var rel, href string for _, attr := range t.Attr { name := attr.Name.Local value := attr.Value switch name { case "href": href = value case "rel": rel = value } } if rel != "" && href != "" { links = append(links, Link{&href, &rel}) } // <link> just has attributes, so must skip manually reader.Skip() default: reader.Skip() } case xml.EndElement: depth-- } } one, err := builder.Build() if err != nil { return nil, err } for _, link := range links { switch *link.rel { case "hostlabels": if one.hostLabels == nil { one.hostLabels = new(AffinityLabelSlice) } one.hostLabels.href = link.href case "hosts": if one.hosts == nil { one.hosts = new(HostSlice) } one.hosts.href = link.href case "vmlabels": if one.vmLabels == nil { one.vmLabels = new(AffinityLabelSlice) } one.vmLabels.href = link.href case "vms": if one.vms == nil { one.vms = new(VmSlice) } one.vms.href = link.href } // end of switch } // end of for-links return one, nil } func XMLAffinityGroupReadMany(reader *XMLReader, start *xml.StartElement) (*AffinityGroupSlice, error) { if start == nil { st, err := reader.FindStartElement() if err != nil { if err == io.EOF { return nil, nil } return nil, err } start = st } var result AffinityGroupSlice depth := 1 for depth > 0 { t, err := reader.Next() if err != nil { if err == io.EOF { break } return nil, err } t = xml.CopyToken(t) switch t := t.(type) { case xml.StartElement: switch t.Name.Local { case "affinity_group": one, err := XMLAffinityGroupReadOne(reader, &t, "affinity_group") if err != nil { return nil, err } if one != nil { result.slice = append(result.slice, one) } default: reader.Skip() } case xml.EndElement: depth-- } } return &result, nil } func XMLAffinityLabelReadOne(reader *XMLReader, start *xml.StartElement, expectedTag string) (*AffinityLabel, error) { builder := NewAffinityLabelBuilder() if start == nil { st, err := reader.FindStartElement() if err != nil { if err == io.EOF { return nil, nil } return nil, err } start = st } if expectedTag == "" { expectedTag = "affinity_label" } if start.Name.Local != expectedTag { return nil, XMLTagNotMatchError{start.Name.Local, expectedTag} } // Process the attributes for _, attr := range start.Attr { name := attr.Name.Local value := attr.Value switch name { case "id": builder.Id(value) case "href": builder.Href(value) } } var links []Link depth := 1 for depth > 0 { t, err := reader.Next() if err != nil { if err == io.EOF { break } return nil, err } t = xml.CopyToken(t) switch t := t.(type) { case xml.StartElement: switch t.Name.Local { case "comment": v, err := reader.ReadString(&t) if err != nil { return nil, err } builder.Comment(v) case "description": v, err := reader.ReadString(&t) if err != nil { return nil, err } builder.Description(v) case "has_implicit_affinity_group": v, err := reader.ReadBool(&t) if err != nil { return nil, err } builder.HasImplicitAffinityGroup(v) case "hosts": v, err := XMLHostReadMany(reader, &t) if err != nil { return nil, err } builder.Hosts(v) case "name": v, err := reader.ReadString(&t) if err != nil { return nil, err } builder.Name(v) case "read_only": v, err := reader.ReadBool(&t) if err != nil { return nil, err } builder.ReadOnly(v) case "vms": v, err := XMLVmReadMany(reader, &t) if err != nil { return nil, err } builder.Vms(v) case "link": var rel, href string for _, attr := range t.Attr { name := attr.Name.Local value := attr.Value switch name { case "href": href = value case "rel": rel = value } } if rel != "" && href != "" { links = append(links, Link{&href, &rel}) } // <link> just has attributes, so must skip manually reader.Skip() default: reader.Skip() } case xml.EndElement: depth-- } } one, err := builder.Build() if err != nil { return nil, err } for _, link := range links { switch *link.rel { case "hosts": if one.hosts == nil { one.hosts = new(HostSlice) } one.hosts.href = link.href case "vms": if one.vms == nil { one.vms = new(VmSlice) } one.vms.href = link.href } // end of switch } // end of for-links return one, nil } func XMLAffinityLabelReadMany(reader *XMLReader, start *xml.StartElement) (*AffinityLabelSlice, error) { if start == nil { st, err := reader.FindStartElement() if err != nil { if err == io.EOF { return nil, nil } return nil, err } start = st } var result AffinityLabelSlice depth := 1 for depth > 0 { t, err := reader.Next() if err != nil { if err == io.EOF { break } return nil, err } t = xml.CopyToken(t) switch t := t.(type) { case xml.StartElement: switch t.Name.Local { case "affinity_label": one, err := XMLAffinityLabelReadOne(reader, &t, "affinity_label") if err != nil { return nil, err } if one != nil { result.slice = append(result.slice, one) } default: reader.Skip() } case xml.EndElement: depth-- } } return &result, nil } func XMLAffinityRuleReadOne(reader *XMLReader, start *xml.StartElement, expectedTag string) (*AffinityRule, error) { builder := NewAffinityRuleBuilder() if start == nil { st, err := reader.FindStartElement() if err != nil { if err == io.EOF { return nil, nil } return nil, err } start = st } if expectedTag == "" { expectedTag = "affinity_rule" } if start.Name.Local != expectedTag { return nil, XMLTagNotMatchError{start.Name.Local, expectedTag} } var links []Link depth := 1 for depth > 0 { t, err := reader.Next() if err != nil { if err == io.EOF { break } return nil, err } t = xml.CopyToken(t) switch t := t.(type) { case xml.StartElement: switch t.Name.Local { case "enabled": v, err := reader.ReadBool(&t) if err != nil { return nil, err } builder.Enabled(v) case "enforcing": v, err := reader.ReadBool(&t) if err != nil { return nil, err } builder.Enforcing(v) case "positive": v, err := reader.ReadBool(&t) if err != nil { return nil, err } builder.Positive(v) case "link": var rel, href string for _, attr := range t.Attr { name := attr.Name.Local value := attr.Value switch name { case "href": href = value case "rel": rel = value } } if rel != "" && href != "" { links = append(links, Link{&href, &rel}) } // <link> just has attributes, so must skip manually reader.Skip() default: reader.Skip() } case xml.EndElement: depth-- } } one, err := builder.Build() if err != nil { return nil, err } for _, link := range links { switch *link.rel { } // end of switch } // end of for-links return one, nil } func XMLAffinityRuleReadMany(reader *XMLReader, start *xml.StartElement) (*AffinityRuleSlice, error) { if start == nil { st, err := reader.FindStartElement() if err != nil { if err == io.EOF { return nil, nil } return nil, err } start = st } var result AffinityRuleSlice depth := 1 for depth > 0 { t, err := reader.Next() if err != nil { if err == io.EOF { break } return nil, err } t = xml.CopyToken(t) switch t := t.(type) { case xml.StartElement: switch t.Name.Local { case "affinity_rule": one, err := XMLAffinityRuleReadOne(reader, &t, "affinity_rule") if err != nil { return nil, err } if one != nil { result.slice = append(result.slice, one) } default: reader.Skip() } case xml.EndElement: depth-- } } return &result, nil } func XMLAgentReadOne(reader *XMLReader, start *xml.StartElement, expectedTag string) (*Agent, error) { builder := NewAgentBuilder() if start == nil { st, err := reader.FindStartElement() if err != nil { if err == io.EOF { return nil, nil } return nil, err } start = st } if expectedTag == "" { expectedTag = "agent" } if start.Name.Local != expectedTag { return nil, XMLTagNotMatchError{start.Name.Local, expectedTag} } // Process the attributes for _, attr := range start.Attr { name := attr.Name.Local value := attr.Value switch name { case "id": builder.Id(value) case "href": builder.Href(value) } } var links []Link depth := 1 for depth > 0 { t, err := reader.Next() if err != nil { if err == io.EOF { break } return nil, err } t = xml.CopyToken(t) switch t := t.(type) { case xml.StartElement: switch t.Name.Local { case "address": v, err := reader.ReadString(&t) if err != nil { return nil, err } builder.Address(v) case "comment": v, err := reader.ReadString(&t) if err != nil { return nil, err } builder.Comment(v) case "concurrent": v, err := reader.ReadBool(&t) if err != nil { return nil, err } builder.Concurrent(v) case "description": v, err := reader.ReadString(&t) if err != nil { return nil, err } builder.Description(v) case "encrypt_options": v, err := reader.ReadBool(&t) if err != nil { return nil, err } builder.EncryptOptions(v) case "host": v, err := XMLHostReadOne(reader, &t, "host") if err != nil { return nil, err } builder.Host(v) case "name": v, err := reader.ReadString(&t) if err != nil { return nil, err } builder.Name(v) case "options": v, err := XMLOptionReadMany(reader, &t) if err != nil { return nil, err } builder.Options(v) case "order": v, err := reader.ReadInt64(&t) if err != nil { return nil, err } builder.Order(v) case "password": v, err := reader.ReadString(&t) if err != nil { return nil, err } builder.Password(v) case "port": v, err := reader.ReadInt64(&t) if err != nil { return nil, err } builder.Port(v) case "type": v, err := reader.ReadString(&t) if err != nil { return nil, err } builder.Type(v) case "username": v, err := reader.ReadString(&t) if err != nil { return nil, err } builder.Username(v) case "link": var rel, href string for _, attr := range t.Attr { name := attr.Name.Local value := attr.Value switch name { case "href": href = value case "rel": rel = value } } if rel != "" && href != "" { links = append(links, Link{&href, &rel}) } // <link> just has attributes, so must skip manually reader.Skip() default: reader.Skip() } case xml.EndElement: depth-- } } one, err := builder.Build() if err != nil { return nil, err } for _, link := range links { switch *link.rel { } // end of switch } // end of for-links return one, nil } func XMLAgentReadMany(reader *XMLReader, start *xml.StartElement) (*AgentSlice, error) { if start == nil { st, err := reader.FindStartElement() if err != nil { if err == io.EOF { return nil, nil } return nil, err } start = st } var result AgentSlice depth := 1 for depth > 0 { t, err := reader.Next() if err != nil { if err == io.EOF { break } return nil, err } t = xml.CopyToken(t) switch t := t.(type) { case xml.StartElement: switch t.Name.Local { case "agent": one, err := XMLAgentReadOne(reader, &t, "agent") if err != nil { return nil, err } if one != nil { result.slice = append(result.slice, one) } default: reader.Skip() } case xml.EndElement: depth-- } } return &result, nil } func XMLAgentConfigurationReadOne(reader *XMLReader, start *xml.StartElement, expectedTag string) (*AgentConfiguration, error) { builder := NewAgentConfigurationBuilder() if start == nil { st, err := reader.FindStartElement() if err != nil { if err == io.EOF { return nil, nil } return nil, err } start = st } if expectedTag == "" { expectedTag = "agent_configuration" } if start.Name.Local != expectedTag { return nil, XMLTagNotMatchError{start.Name.Local, expectedTag} } var links []Link depth := 1 for depth > 0 { t, err := reader.Next() if err != nil { if err == io.EOF { break } return nil, err } t = xml.CopyToken(t) switch t := t.(type) { case xml.StartElement: switch t.Name.Local { case "address": v, err := reader.ReadString(&t) if err != nil { return nil, err } builder.Address(v) case "broker_type": vp, err := XMLMessageBrokerTypeReadOne(reader, &t) v := *vp if err != nil { return nil, err } builder.BrokerType(v) case "network_mappings": v, err := reader.ReadString(&t) if err != nil { return nil, err } builder.NetworkMappings(v) case "password": v, err := reader.ReadString(&t) if err != nil { return nil, err } builder.Password(v) case "port": v, err := reader.ReadInt64(&t) if err != nil { return nil, err } builder.Port(v) case "username": v, err := reader.ReadString(&t) if err != nil { return nil, err } builder.Username(v) case "link": var rel, href string for _, attr := range t.Attr { name := attr.Name.Local value := attr.Value switch name { case "href": href = value case "rel": rel = value } } if rel != "" && href != "" { links = append(links, Link{&href, &rel}) } // <link> just has attributes, so must skip manually reader.Skip() default: reader.Skip() } case xml.EndElement: depth-- } } one, err := builder.Build() if err != nil { return nil, err } for _, link := range links { switch *link.rel { } // end of switch } // end of for-links return one, nil } func XMLAgentConfigurationReadMany(reader *XMLReader, start *xml.StartElement) (*AgentConfigurationSlice, error) { if start == nil { st, err := reader.FindStartElement() if err != nil { if err == io.EOF { return nil, nil } return nil, err } start = st } var result AgentConfigurationSlice depth := 1 for depth > 0 { t, err := reader.Next() if err != nil { if err == io.EOF { break } return nil, err } t = xml.CopyToken(t) switch t := t.(type) { case xml.StartElement: switch t.Name.Local { case "agent_configuration": one, err := XMLAgentConfigurationReadOne(reader, &t, "agent_configuration") if err != nil { return nil, err } if one != nil { result.slice = append(result.slice, one) } default: reader.Skip() } case xml.EndElement: depth-- } } return &result, nil } func XMLApiReadOne(reader *XMLReader, start *xml.StartElement, expectedTag string) (*Api, error) { builder := NewApiBuilder() if start == nil { st, err := reader.FindStartElement() if err != nil { if err == io.EOF { return nil, nil } return nil, err } start = st } if expectedTag == "" { expectedTag = "api" } if start.Name.Local != expectedTag { return nil, XMLTagNotMatchError{start.Name.Local, expectedTag} } var links []Link depth := 1 for depth > 0 { t, err := reader.Next() if err != nil { if err == io.EOF { break } return nil, err } t = xml.CopyToken(t) switch t := t.(type) { case xml.StartElement: switch t.Name.Local { case "authenticated_user": v, err := XMLUserReadOne(reader, &t, "authenticated_user") if err != nil { return nil, err } builder.AuthenticatedUser(v) case "effective_user": v, err := XMLUserReadOne(reader, &t, "effective_user") if err != nil { return nil, err } builder.EffectiveUser(v) case "product_info": v, err := XMLProductInfoReadOne(reader, &t, "product_info") if err != nil { return nil, err } builder.ProductInfo(v) case "special_objects": v, err := XMLSpecialObjectsReadOne(reader, &t, "special_objects") if err != nil { return nil, err } builder.SpecialObjects(v) case "summary": v, err := XMLApiSummaryReadOne(reader, &t, "summary") if err != nil { return nil, err } builder.Summary(v) case "time": v, err := reader.ReadTime(&t) if err != nil { return nil, err } builder.Time(v) case "link": var rel, href string for _, attr := range t.Attr { name := attr.Name.Local value := attr.Value switch name { case "href": href = value case "rel": rel = value } } if rel != "" && href != "" { links = append(links, Link{&href, &rel}) } // <link> just has attributes, so must skip manually reader.Skip() default: reader.Skip() } case xml.EndElement: depth-- } } one, err := builder.Build() if err != nil { return nil, err } for _, link := range links { switch *link.rel { } // end of switch } // end of for-links return one, nil } func XMLApiReadMany(reader *XMLReader, start *xml.StartElement) (*ApiSlice, error) { if start == nil { st, err := reader.FindStartElement() if err != nil { if err == io.EOF { return nil, nil } return nil, err } start = st } var result ApiSlice depth := 1 for depth > 0 { t, err := reader.Next() if err != nil { if err == io.EOF { break } return nil, err } t = xml.CopyToken(t) switch t := t.(type) { case xml.StartElement: switch t.Name.Local { case "api": one, err := XMLApiReadOne(reader, &t, "api") if err != nil { return nil, err } if one != nil { result.slice = append(result.slice, one) } default: reader.Skip() } case xml.EndElement: depth-- } } return &result, nil } func XMLApiSummaryReadOne(reader *XMLReader, start *xml.StartElement, expectedTag string) (*ApiSummary, error) { builder := NewApiSummaryBuilder() if start == nil { st, err := reader.FindStartElement() if err != nil { if err == io.EOF { return nil, nil } return nil, err } start = st } if expectedTag == "" { expectedTag = "api_summary" } if start.Name.Local != expectedTag { return nil, XMLTagNotMatchError{start.Name.Local, expectedTag} } var links []Link depth := 1 for depth > 0 { t, err := reader.Next() if err != nil { if err == io.EOF { break } return nil, err } t = xml.CopyToken(t) switch t := t.(type) { case xml.StartElement: switch t.Name.Local { case "hosts": v, err := XMLApiSummaryItemReadOne(reader, &t, "hosts") if err != nil { return nil, err } builder.Hosts(v) case "storage_domains": v, err := XMLApiSummaryItemReadOne(reader, &t, "storage_domains") if err != nil { return nil, err } builder.StorageDomains(v) case "users": v, err := XMLApiSummaryItemReadOne(reader, &t, "users") if err != nil { return nil, err } builder.Users(v) case "vms": v, err := XMLApiSummaryItemReadOne(reader, &t, "vms") if err != nil { return nil, err } builder.Vms(v) case "link": var rel, href string for _, attr := range t.Attr { name := attr.Name.Local value := attr.Value switch name { case "href": href = value case "rel": rel = value } } if rel != "" && href != "" { links = append(links, Link{&href, &rel}) } // <link> just has attributes, so must skip manually reader.Skip() default: reader.Skip() } case xml.EndElement: depth-- } } one, err := builder.Build() if err != nil { return nil, err } for _, link := range links { switch *link.rel { } // end of switch } // end of for-links return one, nil } func XMLApiSummaryReadMany(reader *XMLReader, start *xml.StartElement) (*ApiSummarySlice, error) { if start == nil { st, err := reader.FindStartElement() if err != nil { if err == io.EOF { return nil, nil } return nil, err } start = st } var result ApiSummarySlice depth := 1 for depth > 0 { t, err := reader.Next() if err != nil { if err == io.EOF { break } return nil, err } t = xml.CopyToken(t) switch t := t.(type) { case xml.StartElement: switch t.Name.Local { case "api_summary": one, err := XMLApiSummaryReadOne(reader, &t, "api_summary") if err != nil { return nil, err } if one != nil { result.slice = append(result.slice, one) } default: reader.Skip() } case xml.EndElement: depth-- } } return &result, nil } func XMLApiSummaryItemReadOne(reader *XMLReader, start *xml.StartElement, expectedTag string) (*ApiSummaryItem, error) { builder := NewApiSummaryItemBuilder() if start == nil { st, err := reader.FindStartElement() if err != nil { if err == io.EOF { return nil, nil } return nil, err } start = st } if expectedTag == "" { expectedTag = "api_summary_item" } if start.Name.Local != expectedTag { return nil, XMLTagNotMatchError{start.Name.Local, expectedTag} } var links []Link depth := 1 for depth > 0 { t, err := reader.Next() if err != nil { if err == io.EOF { break } return nil, err } t = xml.CopyToken(t) switch t := t.(type) { case xml.StartElement: switch t.Name.Local { case "active": v, err := reader.ReadInt64(&t) if err != nil { return nil, err } builder.Active(v) case "total": v, err := reader.ReadInt64(&t) if err != nil { return nil, err } builder.Total(v) case "link": var rel, href string for _, attr := range t.Attr { name := attr.Name.Local value := attr.Value switch name { case "href": href = value case "rel": rel = value } } if rel != "" && href != "" { links = append(links, Link{&href, &rel}) } // <link> just has attributes, so must skip manually reader.Skip() default: reader.Skip() } case xml.EndElement: depth-- } } one, err := builder.Build() if err != nil { return nil, err } for _, link := range links { switch *link.rel { } // end of switch } // end of for-links return one, nil } func XMLApiSummaryItemReadMany(reader *XMLReader, start *xml.StartElement) (*ApiSummaryItemSlice, error) { if start == nil { st, err := reader.FindStartElement() if err != nil { if err == io.EOF { return nil, nil } return nil, err } start = st } var result ApiSummaryItemSlice depth := 1 for depth > 0 { t, err := reader.Next() if err != nil { if err == io.EOF { break } return nil, err } t = xml.CopyToken(t) switch t := t.(type) { case xml.StartElement: switch t.Name.Local { case "api_summary_item": one, err := XMLApiSummaryItemReadOne(reader, &t, "api_summary_item") if err != nil { return nil, err } if one != nil { result.slice = append(result.slice, one) } default: reader.Skip() } case xml.EndElement: depth-- } } return &result, nil } func XMLApplicationReadOne(reader *XMLReader, start *xml.StartElement, expectedTag string) (*Application, error) { builder := NewApplicationBuilder() if start == nil { st, err := reader.FindStartElement() if err != nil { if err == io.EOF { return nil, nil } return nil, err } start = st } if expectedTag == "" { expectedTag = "application" } if start.Name.Local != expectedTag { return nil, XMLTagNotMatchError{start.Name.Local, expectedTag} } // Process the attributes for _, attr := range start.Attr { name := attr.Name.Local value := attr.Value switch name { case "id": builder.Id(value) case "href": builder.Href(value) } } var links []Link depth := 1 for depth > 0 { t, err := reader.Next() if err != nil { if err == io.EOF { break } return nil, err } t = xml.CopyToken(t) switch t := t.(type) { case xml.StartElement: switch t.Name.Local { case "comment": v, err := reader.ReadString(&t) if err != nil { return nil, err } builder.Comment(v) case "description": v, err := reader.ReadString(&t) if err != nil { return nil, err } builder.Description(v) case "name": v, err := reader.ReadString(&t) if err != nil { return nil, err } builder.Name(v) case "vm": v, err := XMLVmReadOne(reader, &t, "vm") if err != nil { return nil, err } builder.Vm(v) case "link": var rel, href string for _, attr := range t.Attr { name := attr.Name.Local value := attr.Value switch name { case "href": href = value case "rel": rel = value } } if rel != "" && href != "" { links = append(links, Link{&href, &rel}) } // <link> just has attributes, so must skip manually reader.Skip() default: reader.Skip() } case xml.EndElement: depth-- } } one, err := builder.Build() if err != nil { return nil, err } for _, link := range links { switch *link.rel { } // end of switch } // end of for-links return one, nil } func XMLApplicationReadMany(reader *XMLReader, start *xml.StartElement) (*ApplicationSlice, error) { if start == nil { st, err := reader.FindStartElement() if err != nil { if err == io.EOF { return nil, nil } return nil, err } start = st } var result ApplicationSlice depth := 1 for depth > 0 { t, err := reader.Next() if err != nil { if err == io.EOF { break } return nil, err } t = xml.CopyToken(t) switch t := t.(type) { case xml.StartElement: switch t.Name.Local { case "application": one, err := XMLApplicationReadOne(reader, &t, "application") if err != nil { return nil, err } if one != nil { result.slice = append(result.slice, one) } default: reader.Skip() } case xml.EndElement: depth-- } } return &result, nil } func XMLAuthorizedKeyReadOne(reader *XMLReader, start *xml.StartElement, expectedTag string) (*AuthorizedKey, error) { builder := NewAuthorizedKeyBuilder() if start == nil { st, err := reader.FindStartElement() if err != nil { if err == io.EOF { return nil, nil } return nil, err } start = st } if expectedTag == "" { expectedTag = "authorized_key" } if start.Name.Local != expectedTag { return nil, XMLTagNotMatchError{start.Name.Local, expectedTag} } // Process the attributes for _, attr := range start.Attr { name := attr.Name.Local value := attr.Value switch name { case "id": builder.Id(value) case "href": builder.Href(value) } } var links []Link depth := 1 for depth > 0 { t, err := reader.Next() if err != nil { if err == io.EOF { break } return nil, err } t = xml.CopyToken(t) switch t := t.(type) { case xml.StartElement: switch t.Name.Local { case "comment": v, err := reader.ReadString(&t) if err != nil { return nil, err } builder.Comment(v) case "description": v, err := reader.ReadString(&t) if err != nil { return nil, err } builder.Description(v) case "key": v, err := reader.ReadString(&t) if err != nil { return nil, err } builder.Key(v) case "name": v, err := reader.ReadString(&t) if err != nil { return nil, err } builder.Name(v) case "user": v, err := XMLUserReadOne(reader, &t, "user") if err != nil { return nil, err } builder.User(v) case "link": var rel, href string for _, attr := range t.Attr { name := attr.Name.Local value := attr.Value switch name { case "href": href = value case "rel": rel = value } } if rel != "" && href != "" { links = append(links, Link{&href, &rel}) } // <link> just has attributes, so must skip manually reader.Skip() default: reader.Skip() } case xml.EndElement: depth-- } } one, err := builder.Build() if err != nil { return nil, err } for _, link := range links { switch *link.rel { } // end of switch } // end of for-links return one, nil } func XMLAuthorizedKeyReadMany(reader *XMLReader, start *xml.StartElement) (*AuthorizedKeySlice, error) { if start == nil { st, err := reader.FindStartElement() if err != nil { if err == io.EOF { return nil, nil } return nil, err } start = st } var result AuthorizedKeySlice depth := 1 for depth > 0 { t, err := reader.Next() if err != nil { if err == io.EOF { break } return nil, err } t = xml.CopyToken(t) switch t := t.(type) { case xml.StartElement: switch t.Name.Local { case "authorized_key": one, err := XMLAuthorizedKeyReadOne(reader, &t, "authorized_key") if err != nil { return nil, err } if one != nil { result.slice = append(result.slice, one) } default:
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
true
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/ovirt/go-ovirt/helper.go
vendor/github.com/ovirt/go-ovirt/helper.go
package ovirtsdk import ( "fmt" "time" ) const DefaultInterval = 10 * time.Second const DefaultVMTimeout = 120 * time.Second // WaitForVM waits for VM to given status func (c *Connection) WaitForVM(vmID string, status VmStatus, timeout time.Duration) error { if timeout <= 0 { timeout = DefaultVMTimeout } if vmID == "" { return fmt.Errorf("the VM ID must not be empty") } vmService := c.SystemService().VmsService().VmService(vmID) for { resp, err := vmService.Get().Send() if err != nil { return err } if timeout <= 0 { return fmt.Errorf("timeout for waiting for VM to %v", status) } vm, ok := resp.Vm() if !ok { continue } if vm.MustStatus() == status { break } timeout = timeout - DefaultInterval time.Sleep(DefaultInterval) } return nil } const DefaultDiskTimeout = 120 * time.Second func (c *Connection) WaitForDisk(diskID string, status DiskStatus, timeout time.Duration) error { if timeout <= 0 { timeout = DefaultDiskTimeout } if diskID == "" { return fmt.Errorf("the Disk ID must not be empty") } diskService := c.SystemService().DisksService().DiskService(diskID) for { resp, err := diskService.Get().Send() if err != nil { return err } if timeout <= 0 { return fmt.Errorf("timeout for waiting for Disk to %v", status) } disk, ok := resp.Disk() if !ok { continue } if disk.MustStatus() == status { break } timeout = timeout - DefaultInterval time.Sleep(DefaultInterval) } return nil }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/ovirt/go-ovirt/error.go
vendor/github.com/ovirt/go-ovirt/error.go
package ovirtsdk import ( "bytes" "errors" "fmt" "net/http" ) type baseError struct { // Code contains the HTTP status code that caused this error. Code int // Msg contains the text message that should be printed to the user. Msg string } // Error returns the error string. func (b *baseError) Error() string { return b.Msg } // AuthError indicates that an authentication or authorization // problem happened, like incorrect user name, incorrect password, or // missing permissions. type AuthError struct { baseError } // Conflict error indicates that the operation failed because of a conflict. // For example, another operation blocks the operation from being executed. type ConflictError struct { baseError } // NotFoundError indicates that an object can't be found. type NotFoundError struct { baseError } // ResponseParseError indicates that the response from the oVirt Engine could not be parsed. type ResponseParseError struct { baseError cause error body []byte } // Unwrap returns the root cause of this error. func (r *ResponseParseError) Unwrap() error { return r.cause } // Body returns the HTTP response body that caused the parse error. func (r *ResponseParseError) Body() []byte { return r.body } // CheckFault takes a failed HTTP response (non-200) and extracts the fault from it. func CheckFault(resBytes []byte, response *http.Response) error { // Process empty response body if len(resBytes) == 0 { return BuildError(response, nil) } reader := NewXMLReader(resBytes) fault, err := XMLFaultReadOne(reader, nil, "") if err != nil { return &ResponseParseError{ baseError{ Code: response.StatusCode, Msg: fmt.Sprintf( "failed to parse oVirt Engine fault response: %s (%v)", resBytes, err, ), }, err, resBytes, } } if fault != nil || response.StatusCode >= 400 { return BuildError(response, fault) } return errors.New("unknown error") } // CheckAction checks if response contains an Action instance func CheckAction(resBytes []byte, response *http.Response) (*Action, error) { // Process empty response body if len(resBytes) == 0 { return nil, BuildError(response, nil) } var tagNotMatchError XMLTagNotMatchError faultreader := NewXMLReader(resBytes) fault, err := XMLFaultReadOne(faultreader, nil, "") if err != nil { // If the tag mismatches, return the err if !errors.As(err, &tagNotMatchError) { return nil, &ResponseParseError{ baseError{ Code: response.StatusCode, Msg: fmt.Sprintf( "failed to parse oVirt Engine response: %s (%v)", resBytes, err, ), }, err, resBytes, } } } if fault != nil { return nil, BuildError(response, fault) } actionreader := NewXMLReader(resBytes) action, err := XMLActionReadOne(actionreader, nil, "") if err != nil { // If the tag mismatches, return the err if errors.As(err, &tagNotMatchError) { return nil, err } } if action != nil { if afault, ok := action.Fault(); ok { return nil, BuildError(response, afault) } return action, nil } return nil, nil } // BuildError constructs error func BuildError(response *http.Response, fault *Fault) error { var buffer bytes.Buffer if fault != nil { if reason, ok := fault.Reason(); ok { if buffer.Len() > 0 { buffer.WriteString(" ") } buffer.WriteString(fmt.Sprintf("Fault reason is \"%s\".", reason)) } if detail, ok := fault.Detail(); ok { if buffer.Len() > 0 { buffer.WriteString(" ") } buffer.WriteString(fmt.Sprintf("Fault detail is \"%s\".", detail)) } } if response != nil { if buffer.Len() > 0 { buffer.WriteString(" ") } buffer.WriteString(fmt.Sprintf("HTTP response code is \"%d\".", response.StatusCode)) buffer.WriteString(" ") buffer.WriteString(fmt.Sprintf("HTTP response message is \"%s\".", response.Status)) if Contains(response.StatusCode, []int{401, 403}) { return &AuthError{ baseError{ response.StatusCode, buffer.String(), }, } } else if response.StatusCode == 404 { return &NotFoundError{ baseError{ response.StatusCode, buffer.String(), }, } } else if response.StatusCode == 409 { return &ConflictError{ baseError{ response.StatusCode, buffer.String(), }, } } } return errors.New(buffer.String()) }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/ovirt/go-ovirt/types.go
vendor/github.com/ovirt/go-ovirt/types.go
// Copyright (c) 2017 Joey <majunjiev@gmail.com>. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package ovirtsdk import ( "fmt" "time" ) // AffinityGroup An affinity group represents a group of virtual machines with a defined relationship. type AffinityGroup struct { Struct cluster *Cluster comment *string description *string enforcing *bool hostLabels *AffinityLabelSlice hosts *HostSlice hostsRule *AffinityRule id *string name *string positive *bool priority *float64 vmLabels *AffinityLabelSlice vms *VmSlice vmsRule *AffinityRule } // SetCluster Type representation of a cluster. // // A JSON representation of a cluster: // // [source] // ---- // // { // "cluster" : [ { // "ballooning_enabled" : "false", // "cpu" : { // "architecture" : "x86_64", // "type" : "Intel SandyBridge Family" // }, // "custom_scheduling_policy_properties" : { // "property" : [ { // "name" : "HighUtilization", // "value" : "80" // }, { // "name" : "CpuOverCommitDurationMinutes", // "value" : "2" // } ] // }, // "error_handling" : { // "on_error" : "migrate" // }, // "fencing_policy" : { // "enabled" : "true", // "skip_if_connectivity_broken" : { // "enabled" : "false", // "threshold" : "50" // }, // "skip_if_gluster_bricks_up" : "false", // "skip_if_gluster_quorum_not_met" : "false", // "skip_if_sd_active" : { // "enabled" : "false" // } // }, // "gluster_service" : "false", // "firewall_type" : "iptables", // "ha_reservation" : "false", // "ksm" : { // "enabled" : "true", // "merge_across_nodes" : "true" // }, // "memory_policy" : { // "over_commit" : { // "percent" : "100" // }, // "transparent_hugepages" : { // "enabled" : "true" // } // }, // "migration" : { // "auto_converge" : "inherit", // "bandwidth" : { // "assignment_method" : "auto" // }, // "compressed" : "inherit", // "policy" : { // "id" : "00000000-0000-0000-0000-000000000000" // } // }, // "required_rng_sources" : { // "required_rng_source" : [ "random" ] // }, // "switch_type" : "legacy", // "threads_as_cores" : "false", // "trusted_service" : "false", // "tunnel_migration" : "false", // "version" : { // "major" : "4", // "minor" : "1" // }, // "virt_service" : "true", // "data_center" : { // "href" : "/ovirt-engine/api/datacenters/123", // "id" : "123" // }, // "mac_pool" : { // "href" : "/ovirt-engine/api/macpools/456", // "id" : "456" // }, // "scheduling_policy" : { // "href" : "/ovirt-engine/api/schedulingpolicies/789", // "id" : "789" // }, // "actions" : { // "link" : [ { // "href" : "/ovirt-engine/api/clusters/234/resetemulatedmachine", // "rel" : "resetemulatedmachine" // } ] // }, // "name" : "Default", // "description" : "The default server cluster", // "href" : "/ovirt-engine/api/clusters/234", // "id" : "234", // "link" : [ { // "href" : "/ovirt-engine/api/clusters/234/permissions", // "rel" : "permissions" // }, { // "href" : "/ovirt-engine/api/clusters/234/cpuprofiles", // "rel" : "cpuprofiles" // }, { // "href" : "/ovirt-engine/api/clusters/234/networkfilters", // "rel" : "networkfilters" // }, { // "href" : "/ovirt-engine/api/clusters/234/networks", // "rel" : "networks" // }, { // "href" : "/ovirt-engine/api/clusters/234/affinitygroups", // "rel" : "affinitygroups" // }, { // "href" : "/ovirt-engine/api/clusters/234/glusterhooks", // "rel" : "glusterhooks" // }, { // "href" : "/ovirt-engine/api/clusters/234/glustervolumes", // "rel" : "glustervolumes" // }, { // "href" : "/ovirt-engine/api/clusters/234/enabledfeatures", // "rel" : "enabledfeatures" // }, { // "href" : "/ovirt-engine/api/clusters/234/externalnetworkproviders", // "rel" : "externalnetworkproviders" // } ] // } ] // } // // ---- func (p *AffinityGroup) SetCluster(attr *Cluster) { p.cluster = attr } // Cluster Type representation of a cluster. // // A JSON representation of a cluster: // // [source] // ---- // // { // "cluster" : [ { // "ballooning_enabled" : "false", // "cpu" : { // "architecture" : "x86_64", // "type" : "Intel SandyBridge Family" // }, // "custom_scheduling_policy_properties" : { // "property" : [ { // "name" : "HighUtilization", // "value" : "80" // }, { // "name" : "CpuOverCommitDurationMinutes", // "value" : "2" // } ] // }, // "error_handling" : { // "on_error" : "migrate" // }, // "fencing_policy" : { // "enabled" : "true", // "skip_if_connectivity_broken" : { // "enabled" : "false", // "threshold" : "50" // }, // "skip_if_gluster_bricks_up" : "false", // "skip_if_gluster_quorum_not_met" : "false", // "skip_if_sd_active" : { // "enabled" : "false" // } // }, // "gluster_service" : "false", // "firewall_type" : "iptables", // "ha_reservation" : "false", // "ksm" : { // "enabled" : "true", // "merge_across_nodes" : "true" // }, // "memory_policy" : { // "over_commit" : { // "percent" : "100" // }, // "transparent_hugepages" : { // "enabled" : "true" // } // }, // "migration" : { // "auto_converge" : "inherit", // "bandwidth" : { // "assignment_method" : "auto" // }, // "compressed" : "inherit", // "policy" : { // "id" : "00000000-0000-0000-0000-000000000000" // } // }, // "required_rng_sources" : { // "required_rng_source" : [ "random" ] // }, // "switch_type" : "legacy", // "threads_as_cores" : "false", // "trusted_service" : "false", // "tunnel_migration" : "false", // "version" : { // "major" : "4", // "minor" : "1" // }, // "virt_service" : "true", // "data_center" : { // "href" : "/ovirt-engine/api/datacenters/123", // "id" : "123" // }, // "mac_pool" : { // "href" : "/ovirt-engine/api/macpools/456", // "id" : "456" // }, // "scheduling_policy" : { // "href" : "/ovirt-engine/api/schedulingpolicies/789", // "id" : "789" // }, // "actions" : { // "link" : [ { // "href" : "/ovirt-engine/api/clusters/234/resetemulatedmachine", // "rel" : "resetemulatedmachine" // } ] // }, // "name" : "Default", // "description" : "The default server cluster", // "href" : "/ovirt-engine/api/clusters/234", // "id" : "234", // "link" : [ { // "href" : "/ovirt-engine/api/clusters/234/permissions", // "rel" : "permissions" // }, { // "href" : "/ovirt-engine/api/clusters/234/cpuprofiles", // "rel" : "cpuprofiles" // }, { // "href" : "/ovirt-engine/api/clusters/234/networkfilters", // "rel" : "networkfilters" // }, { // "href" : "/ovirt-engine/api/clusters/234/networks", // "rel" : "networks" // }, { // "href" : "/ovirt-engine/api/clusters/234/affinitygroups", // "rel" : "affinitygroups" // }, { // "href" : "/ovirt-engine/api/clusters/234/glusterhooks", // "rel" : "glusterhooks" // }, { // "href" : "/ovirt-engine/api/clusters/234/glustervolumes", // "rel" : "glustervolumes" // }, { // "href" : "/ovirt-engine/api/clusters/234/enabledfeatures", // "rel" : "enabledfeatures" // }, { // "href" : "/ovirt-engine/api/clusters/234/externalnetworkproviders", // "rel" : "externalnetworkproviders" // } ] // } ] // } // // ---- func (p *AffinityGroup) Cluster() (*Cluster, bool) { if p.cluster != nil { return p.cluster, true } return nil, false } // MustCluster Type representation of a cluster. // // A JSON representation of a cluster: // // [source] // ---- // // { // "cluster" : [ { // "ballooning_enabled" : "false", // "cpu" : { // "architecture" : "x86_64", // "type" : "Intel SandyBridge Family" // }, // "custom_scheduling_policy_properties" : { // "property" : [ { // "name" : "HighUtilization", // "value" : "80" // }, { // "name" : "CpuOverCommitDurationMinutes", // "value" : "2" // } ] // }, // "error_handling" : { // "on_error" : "migrate" // }, // "fencing_policy" : { // "enabled" : "true", // "skip_if_connectivity_broken" : { // "enabled" : "false", // "threshold" : "50" // }, // "skip_if_gluster_bricks_up" : "false", // "skip_if_gluster_quorum_not_met" : "false", // "skip_if_sd_active" : { // "enabled" : "false" // } // }, // "gluster_service" : "false", // "firewall_type" : "iptables", // "ha_reservation" : "false", // "ksm" : { // "enabled" : "true", // "merge_across_nodes" : "true" // }, // "memory_policy" : { // "over_commit" : { // "percent" : "100" // }, // "transparent_hugepages" : { // "enabled" : "true" // } // }, // "migration" : { // "auto_converge" : "inherit", // "bandwidth" : { // "assignment_method" : "auto" // }, // "compressed" : "inherit", // "policy" : { // "id" : "00000000-0000-0000-0000-000000000000" // } // }, // "required_rng_sources" : { // "required_rng_source" : [ "random" ] // }, // "switch_type" : "legacy", // "threads_as_cores" : "false", // "trusted_service" : "false", // "tunnel_migration" : "false", // "version" : { // "major" : "4", // "minor" : "1" // }, // "virt_service" : "true", // "data_center" : { // "href" : "/ovirt-engine/api/datacenters/123", // "id" : "123" // }, // "mac_pool" : { // "href" : "/ovirt-engine/api/macpools/456", // "id" : "456" // }, // "scheduling_policy" : { // "href" : "/ovirt-engine/api/schedulingpolicies/789", // "id" : "789" // }, // "actions" : { // "link" : [ { // "href" : "/ovirt-engine/api/clusters/234/resetemulatedmachine", // "rel" : "resetemulatedmachine" // } ] // }, // "name" : "Default", // "description" : "The default server cluster", // "href" : "/ovirt-engine/api/clusters/234", // "id" : "234", // "link" : [ { // "href" : "/ovirt-engine/api/clusters/234/permissions", // "rel" : "permissions" // }, { // "href" : "/ovirt-engine/api/clusters/234/cpuprofiles", // "rel" : "cpuprofiles" // }, { // "href" : "/ovirt-engine/api/clusters/234/networkfilters", // "rel" : "networkfilters" // }, { // "href" : "/ovirt-engine/api/clusters/234/networks", // "rel" : "networks" // }, { // "href" : "/ovirt-engine/api/clusters/234/affinitygroups", // "rel" : "affinitygroups" // }, { // "href" : "/ovirt-engine/api/clusters/234/glusterhooks", // "rel" : "glusterhooks" // }, { // "href" : "/ovirt-engine/api/clusters/234/glustervolumes", // "rel" : "glustervolumes" // }, { // "href" : "/ovirt-engine/api/clusters/234/enabledfeatures", // "rel" : "enabledfeatures" // }, { // "href" : "/ovirt-engine/api/clusters/234/externalnetworkproviders", // "rel" : "externalnetworkproviders" // } ] // } ] // } // // ---- func (p *AffinityGroup) MustCluster() *Cluster { if p.cluster == nil { panic("the cluster must not be nil, please use Cluster() function instead") } return p.cluster } func (p *AffinityGroup) SetComment(attr string) { p.comment = &attr } func (p *AffinityGroup) Comment() (string, bool) { if p.comment != nil { return *p.comment, true } var zero string return zero, false } func (p *AffinityGroup) MustComment() string { if p.comment == nil { panic("the comment must not be nil, please use Comment() function instead") } return *p.comment } func (p *AffinityGroup) SetDescription(attr string) { p.description = &attr } func (p *AffinityGroup) Description() (string, bool) { if p.description != nil { return *p.description, true } var zero string return zero, false } func (p *AffinityGroup) MustDescription() string { if p.description == nil { panic("the description must not be nil, please use Description() function instead") } return *p.description } func (p *AffinityGroup) SetEnforcing(attr bool) { p.enforcing = &attr } func (p *AffinityGroup) Enforcing() (bool, bool) { if p.enforcing != nil { return *p.enforcing, true } var zero bool return zero, false } func (p *AffinityGroup) MustEnforcing() bool { if p.enforcing == nil { panic("the enforcing must not be nil, please use Enforcing() function instead") } return *p.enforcing } func (p *AffinityGroup) SetHostLabels(attr *AffinityLabelSlice) { p.hostLabels = attr } func (p *AffinityGroup) HostLabels() (*AffinityLabelSlice, bool) { if p.hostLabels != nil { return p.hostLabels, true } return nil, false } func (p *AffinityGroup) MustHostLabels() *AffinityLabelSlice { if p.hostLabels == nil { panic("the hostLabels must not be nil, please use HostLabels() function instead") } return p.hostLabels } func (p *AffinityGroup) SetHosts(attr *HostSlice) { p.hosts = attr } func (p *AffinityGroup) Hosts() (*HostSlice, bool) { if p.hosts != nil { return p.hosts, true } return nil, false } func (p *AffinityGroup) MustHosts() *HostSlice { if p.hosts == nil { panic("the hosts must not be nil, please use Hosts() function instead") } return p.hosts } // SetHostsRule Generic rule definition for affinity group. Each supported resource type (virtual machine, host) is controlled // by a separate rule. This allows expressing of rules like: no affinity between defined virtual machines, but hard // affinity between defined virtual machines and virtual hosts. func (p *AffinityGroup) SetHostsRule(attr *AffinityRule) { p.hostsRule = attr } // HostsRule Generic rule definition for affinity group. Each supported resource type (virtual machine, host) is controlled // by a separate rule. This allows expressing of rules like: no affinity between defined virtual machines, but hard // affinity between defined virtual machines and virtual hosts. func (p *AffinityGroup) HostsRule() (*AffinityRule, bool) { if p.hostsRule != nil { return p.hostsRule, true } return nil, false } // MustHostsRule Generic rule definition for affinity group. Each supported resource type (virtual machine, host) is controlled // by a separate rule. This allows expressing of rules like: no affinity between defined virtual machines, but hard // affinity between defined virtual machines and virtual hosts. func (p *AffinityGroup) MustHostsRule() *AffinityRule { if p.hostsRule == nil { panic("the hostsRule must not be nil, please use HostsRule() function instead") } return p.hostsRule } func (p *AffinityGroup) SetId(attr string) { p.id = &attr } func (p *AffinityGroup) Id() (string, bool) { if p.id != nil { return *p.id, true } var zero string return zero, false } func (p *AffinityGroup) MustId() string { if p.id == nil { panic("the id must not be nil, please use Id() function instead") } return *p.id } func (p *AffinityGroup) SetName(attr string) { p.name = &attr } func (p *AffinityGroup) Name() (string, bool) { if p.name != nil { return *p.name, true } var zero string return zero, false } func (p *AffinityGroup) MustName() string { if p.name == nil { panic("the name must not be nil, please use Name() function instead") } return *p.name } func (p *AffinityGroup) SetPositive(attr bool) { p.positive = &attr } func (p *AffinityGroup) Positive() (bool, bool) { if p.positive != nil { return *p.positive, true } var zero bool return zero, false } func (p *AffinityGroup) MustPositive() bool { if p.positive == nil { panic("the positive must not be nil, please use Positive() function instead") } return *p.positive } func (p *AffinityGroup) SetPriority(attr float64) { p.priority = &attr } func (p *AffinityGroup) Priority() (float64, bool) { if p.priority != nil { return *p.priority, true } var zero float64 return zero, false } func (p *AffinityGroup) MustPriority() float64 { if p.priority == nil { panic("the priority must not be nil, please use Priority() function instead") } return *p.priority } func (p *AffinityGroup) SetVmLabels(attr *AffinityLabelSlice) { p.vmLabels = attr } func (p *AffinityGroup) VmLabels() (*AffinityLabelSlice, bool) { if p.vmLabels != nil { return p.vmLabels, true } return nil, false } func (p *AffinityGroup) MustVmLabels() *AffinityLabelSlice { if p.vmLabels == nil { panic("the vmLabels must not be nil, please use VmLabels() function instead") } return p.vmLabels } func (p *AffinityGroup) SetVms(attr *VmSlice) { p.vms = attr } func (p *AffinityGroup) Vms() (*VmSlice, bool) { if p.vms != nil { return p.vms, true } return nil, false } func (p *AffinityGroup) MustVms() *VmSlice { if p.vms == nil { panic("the vms must not be nil, please use Vms() function instead") } return p.vms } // SetVmsRule Generic rule definition for affinity group. Each supported resource type (virtual machine, host) is controlled // by a separate rule. This allows expressing of rules like: no affinity between defined virtual machines, but hard // affinity between defined virtual machines and virtual hosts. func (p *AffinityGroup) SetVmsRule(attr *AffinityRule) { p.vmsRule = attr } // VmsRule Generic rule definition for affinity group. Each supported resource type (virtual machine, host) is controlled // by a separate rule. This allows expressing of rules like: no affinity between defined virtual machines, but hard // affinity between defined virtual machines and virtual hosts. func (p *AffinityGroup) VmsRule() (*AffinityRule, bool) { if p.vmsRule != nil { return p.vmsRule, true } return nil, false } // MustVmsRule Generic rule definition for affinity group. Each supported resource type (virtual machine, host) is controlled // by a separate rule. This allows expressing of rules like: no affinity between defined virtual machines, but hard // affinity between defined virtual machines and virtual hosts. func (p *AffinityGroup) MustVmsRule() *AffinityRule { if p.vmsRule == nil { panic("the vmsRule must not be nil, please use VmsRule() function instead") } return p.vmsRule } // AffinityLabel The affinity label can influence virtual machine scheduling. // It is most frequently used to create a sub-cluster from the available hosts. type AffinityLabel struct { Struct comment *string description *string hasImplicitAffinityGroup *bool hosts *HostSlice id *string name *string readOnly *bool vms *VmSlice } func (p *AffinityLabel) SetComment(attr string) { p.comment = &attr } func (p *AffinityLabel) Comment() (string, bool) { if p.comment != nil { return *p.comment, true } var zero string return zero, false } func (p *AffinityLabel) MustComment() string { if p.comment == nil { panic("the comment must not be nil, please use Comment() function instead") } return *p.comment } func (p *AffinityLabel) SetDescription(attr string) { p.description = &attr } func (p *AffinityLabel) Description() (string, bool) { if p.description != nil { return *p.description, true } var zero string return zero, false } func (p *AffinityLabel) MustDescription() string { if p.description == nil { panic("the description must not be nil, please use Description() function instead") } return *p.description } func (p *AffinityLabel) SetHasImplicitAffinityGroup(attr bool) { p.hasImplicitAffinityGroup = &attr } func (p *AffinityLabel) HasImplicitAffinityGroup() (bool, bool) { if p.hasImplicitAffinityGroup != nil { return *p.hasImplicitAffinityGroup, true } var zero bool return zero, false } func (p *AffinityLabel) MustHasImplicitAffinityGroup() bool { if p.hasImplicitAffinityGroup == nil { panic("the hasImplicitAffinityGroup must not be nil, please use HasImplicitAffinityGroup() function instead") } return *p.hasImplicitAffinityGroup } func (p *AffinityLabel) SetHosts(attr *HostSlice) { p.hosts = attr } func (p *AffinityLabel) Hosts() (*HostSlice, bool) { if p.hosts != nil { return p.hosts, true } return nil, false } func (p *AffinityLabel) MustHosts() *HostSlice { if p.hosts == nil { panic("the hosts must not be nil, please use Hosts() function instead") } return p.hosts } func (p *AffinityLabel) SetId(attr string) { p.id = &attr } func (p *AffinityLabel) Id() (string, bool) { if p.id != nil { return *p.id, true } var zero string return zero, false } func (p *AffinityLabel) MustId() string { if p.id == nil { panic("the id must not be nil, please use Id() function instead") } return *p.id } func (p *AffinityLabel) SetName(attr string) { p.name = &attr } func (p *AffinityLabel) Name() (string, bool) { if p.name != nil { return *p.name, true } var zero string return zero, false } func (p *AffinityLabel) MustName() string { if p.name == nil { panic("the name must not be nil, please use Name() function instead") } return *p.name } func (p *AffinityLabel) SetReadOnly(attr bool) { p.readOnly = &attr } func (p *AffinityLabel) ReadOnly() (bool, bool) { if p.readOnly != nil { return *p.readOnly, true } var zero bool return zero, false } func (p *AffinityLabel) MustReadOnly() bool { if p.readOnly == nil { panic("the readOnly must not be nil, please use ReadOnly() function instead") } return *p.readOnly } func (p *AffinityLabel) SetVms(attr *VmSlice) { p.vms = attr } func (p *AffinityLabel) Vms() (*VmSlice, bool) { if p.vms != nil { return p.vms, true } return nil, false } func (p *AffinityLabel) MustVms() *VmSlice { if p.vms == nil { panic("the vms must not be nil, please use Vms() function instead") } return p.vms } // AffinityRule Generic rule definition for affinity group. Each supported resource type (virtual machine, host) is controlled // by a separate rule. This allows expressing of rules like: no affinity between defined virtual machines, but hard // affinity between defined virtual machines and virtual hosts. type AffinityRule struct { Struct enabled *bool enforcing *bool positive *bool } func (p *AffinityRule) SetEnabled(attr bool) { p.enabled = &attr } func (p *AffinityRule) Enabled() (bool, bool) { if p.enabled != nil { return *p.enabled, true } var zero bool return zero, false } func (p *AffinityRule) MustEnabled() bool { if p.enabled == nil { panic("the enabled must not be nil, please use Enabled() function instead") } return *p.enabled } func (p *AffinityRule) SetEnforcing(attr bool) { p.enforcing = &attr } func (p *AffinityRule) Enforcing() (bool, bool) { if p.enforcing != nil { return *p.enforcing, true } var zero bool return zero, false } func (p *AffinityRule) MustEnforcing() bool { if p.enforcing == nil { panic("the enforcing must not be nil, please use Enforcing() function instead") } return *p.enforcing } func (p *AffinityRule) SetPositive(attr bool) { p.positive = &attr } func (p *AffinityRule) Positive() (bool, bool) { if p.positive != nil { return *p.positive, true } var zero bool return zero, false } func (p *AffinityRule) MustPositive() bool { if p.positive == nil { panic("the positive must not be nil, please use Positive() function instead") } return *p.positive } // Agent Type representing a fence agent. type Agent struct { Struct address *string comment *string concurrent *bool description *string encryptOptions *bool host *Host id *string name *string options *OptionSlice order *int64 password *string port *int64 type_ *string username *string } func (p *Agent) SetAddress(attr string) { p.address = &attr } func (p *Agent) Address() (string, bool) { if p.address != nil { return *p.address, true } var zero string return zero, false } func (p *Agent) MustAddress() string { if p.address == nil { panic("the address must not be nil, please use Address() function instead") } return *p.address } func (p *Agent) SetComment(attr string) { p.comment = &attr } func (p *Agent) Comment() (string, bool) { if p.comment != nil { return *p.comment, true } var zero string return zero, false } func (p *Agent) MustComment() string { if p.comment == nil { panic("the comment must not be nil, please use Comment() function instead") } return *p.comment } func (p *Agent) SetConcurrent(attr bool) { p.concurrent = &attr } func (p *Agent) Concurrent() (bool, bool) { if p.concurrent != nil { return *p.concurrent, true } var zero bool return zero, false } func (p *Agent) MustConcurrent() bool { if p.concurrent == nil { panic("the concurrent must not be nil, please use Concurrent() function instead") } return *p.concurrent } func (p *Agent) SetDescription(attr string) { p.description = &attr } func (p *Agent) Description() (string, bool) { if p.description != nil { return *p.description, true } var zero string return zero, false } func (p *Agent) MustDescription() string { if p.description == nil { panic("the description must not be nil, please use Description() function instead") } return *p.description } func (p *Agent) SetEncryptOptions(attr bool) { p.encryptOptions = &attr } func (p *Agent) EncryptOptions() (bool, bool) { if p.encryptOptions != nil { return *p.encryptOptions, true } var zero bool return zero, false } func (p *Agent) MustEncryptOptions() bool { if p.encryptOptions == nil { panic("the encryptOptions must not be nil, please use EncryptOptions() function instead") } return *p.encryptOptions } // SetHost Type representing a host. func (p *Agent) SetHost(attr *Host) { p.host = attr } // Host Type representing a host. func (p *Agent) Host() (*Host, bool) { if p.host != nil { return p.host, true } return nil, false } // MustHost Type representing a host. func (p *Agent) MustHost() *Host { if p.host == nil { panic("the host must not be nil, please use Host() function instead") } return p.host } func (p *Agent) SetId(attr string) { p.id = &attr } func (p *Agent) Id() (string, bool) { if p.id != nil { return *p.id, true } var zero string return zero, false } func (p *Agent) MustId() string { if p.id == nil { panic("the id must not be nil, please use Id() function instead") } return *p.id } func (p *Agent) SetName(attr string) { p.name = &attr } func (p *Agent) Name() (string, bool) { if p.name != nil { return *p.name, true } var zero string return zero, false } func (p *Agent) MustName() string { if p.name == nil { panic("the name must not be nil, please use Name() function instead") } return *p.name } func (p *Agent) SetOptions(attr *OptionSlice) { p.options = attr } func (p *Agent) Options() (*OptionSlice, bool) { if p.options != nil { return p.options, true } return nil, false } func (p *Agent) MustOptions() *OptionSlice { if p.options == nil { panic("the options must not be nil, please use Options() function instead") } return p.options } func (p *Agent) SetOrder(attr int64) { p.order = &attr } func (p *Agent) Order() (int64, bool) { if p.order != nil { return *p.order, true } var zero int64 return zero, false } func (p *Agent) MustOrder() int64 { if p.order == nil { panic("the order must not be nil, please use Order() function instead") } return *p.order } func (p *Agent) SetPassword(attr string) { p.password = &attr } func (p *Agent) Password() (string, bool) { if p.password != nil { return *p.password, true } var zero string return zero, false } func (p *Agent) MustPassword() string { if p.password == nil { panic("the password must not be nil, please use Password() function instead") } return *p.password } func (p *Agent) SetPort(attr int64) { p.port = &attr } func (p *Agent) Port() (int64, bool) { if p.port != nil { return *p.port, true } var zero int64 return zero, false } func (p *Agent) MustPort() int64 { if p.port == nil { panic("the port must not be nil, please use Port() function instead") } return *p.port } func (p *Agent) SetType(attr string) { p.type_ = &attr } func (p *Agent) Type() (string, bool) { if p.type_ != nil { return *p.type_, true } var zero string return zero, false } func (p *Agent) MustType() string { if p.type_ == nil { panic("the type_ must not be nil, please use Type() function instead") } return *p.type_ } func (p *Agent) SetUsername(attr string) { p.username = &attr } func (p *Agent) Username() (string, bool) { if p.username != nil { return *p.username, true } var zero string return zero, false } func (p *Agent) MustUsername() string { if p.username == nil { panic("the username must not be nil, please use Username() function instead") } return *p.username } // AgentConfiguration Deprecated Agent configuration settings. // // Ignored, because the deployment of OpenStack Neutron agent is dropped since {product-name} 4.4.0. // The deployment of OpenStack hosts can be done by Red Hat OpenStack Platform Director or TripleO. type AgentConfiguration struct { Struct address *string brokerType *MessageBrokerType networkMappings *string password *string port *int64 username *string } func (p *AgentConfiguration) SetAddress(attr string) { p.address = &attr } func (p *AgentConfiguration) Address() (string, bool) { if p.address != nil { return *p.address, true } var zero string return zero, false } func (p *AgentConfiguration) MustAddress() string { if p.address == nil { panic("the address must not be nil, please use Address() function instead") } return *p.address } // SetBrokerType Deprecated Message Broker type. // // Ignored, because the deployment of OpenStack Neutron agent is dropped since {product-name} 4.4.0. func (p *AgentConfiguration) SetBrokerType(attr MessageBrokerType) { p.brokerType = &attr } // BrokerType Deprecated Message Broker type. // // Ignored, because the deployment of OpenStack Neutron agent is dropped since {product-name} 4.4.0. func (p *AgentConfiguration) BrokerType() (MessageBrokerType, bool) { if p.brokerType != nil { return *p.brokerType, true } var zero MessageBrokerType return zero, false } // MustBrokerType Deprecated Message Broker type. // // Ignored, because the deployment of OpenStack Neutron agent is dropped since {product-name} 4.4.0. func (p *AgentConfiguration) MustBrokerType() MessageBrokerType { if p.brokerType == nil { panic("the brokerType must not be nil, please use BrokerType() function instead") } return *p.brokerType } func (p *AgentConfiguration) SetNetworkMappings(attr string) { p.networkMappings = &attr } func (p *AgentConfiguration) NetworkMappings() (string, bool) { if p.networkMappings != nil { return *p.networkMappings, true } var zero string return zero, false } func (p *AgentConfiguration) MustNetworkMappings() string { if p.networkMappings == nil { panic("the networkMappings must not be nil, please use NetworkMappings() function instead") } return *p.networkMappings } func (p *AgentConfiguration) SetPassword(attr string) { p.password = &attr } func (p *AgentConfiguration) Password() (string, bool) { if p.password != nil { return *p.password, true } var zero string return zero, false }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
true
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/ovirt/go-ovirt/service.go
vendor/github.com/ovirt/go-ovirt/service.go
// // Copyright (c) 2017 Joey <majunjiev@gmail.com>. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // package ovirtsdk // Service is the interface of all type services. type Service interface { Connection() *Connection Path() string } // BaseService represents the base for all the services of the SDK. It contains the // utility methods used by all of them. type BaseService struct { connection *Connection path string } func (service *BaseService) Connection() *Connection { return service.connection } func (service *BaseService) Path() string { return service.path }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/ovirt/go-ovirt/utils.go
vendor/github.com/ovirt/go-ovirt/utils.go
// // Copyright (c) 2017 Joey <majunjiev@gmail.com>. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // package ovirtsdk import ( "reflect" ) // Contains returns if target contains the obj parameter func Contains(obj interface{}, target interface{}) bool { targetValue := reflect.ValueOf(target) switch reflect.TypeOf(target).Kind() { case reflect.Slice, reflect.Array: for i := 0; i < targetValue.Len(); i++ { if targetValue.Index(i).Interface() == obj { return true } } case reflect.Map: if targetValue.MapIndex(reflect.ValueOf(obj)).IsValid() { return true } } return false }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/ovirt/go-ovirt/connection.go
vendor/github.com/ovirt/go-ovirt/connection.go
// // Copyright (c) 2017 Joey <majunjiev@gmail.com>. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // // Some codes of this file is from https://github.com/CpuID/ovirt-engine-sdk-go/blob/master/sdk/http/http.go. // And I made some bug fixes, Thanks to @CpuID package ovirtsdk import ( "crypto/tls" "crypto/x509" "encoding/json" "errors" "fmt" "io/ioutil" "net/http" "net/url" "os" "reflect" "strings" "time" ) // LogFunc represents a flexiable and injectable logger function which fits to most of logger libraries type LogFunc func(format string, v ...interface{}) // Connection represents an HTTP connection to the engine server. // It is intended as the entry point for the SDK, and it provides access to the `system` service and, from there, // to the rest of the services provided by the API. type Connection struct { url *url.URL username string password string token string insecure bool tlsConfig *tls.Config certPool *x509.CertPool caFile string caCert []byte headers map[string]string proxy *url.URL proxyFromEnvironment bool // Debug options logFunc LogFunc kerberos bool timeout time.Duration compress bool // http client client *http.Client // SSO attributes ssoToken string ssoTokenName string } // URL returns the base URL of this connection. func (c *Connection) URL() string { return c.url.String() } // Test tests the connectivity with the server using the system service. func (c *Connection) Test() error { _, err := c.SystemService().Get().Send() if err != nil { return fmt.Errorf("failed to validate the connection (%w)", err) } return nil } func (c *Connection) getHref(object Href) (string, bool) { return object.Href() } // IsLink indicates if the given object is a link. // An object is a link if it has an `href` attribute. func (c *Connection) IsLink(object Href) bool { _, ok := c.getHref(object) return ok } // FollowLink follows the `href` attribute of the given object, retrieves the target object and returns it. func (c *Connection) FollowLink(object Href) (interface{}, error) { if !c.IsLink(object) { return nil, errors.New("Can't follow link because object don't have any") } href, ok := c.getHref(object) if !ok { return nil, errors.New("Can't follow link because the 'href' attribute does't have a value") } useURL, err := url.Parse(c.URL()) if err != nil { return nil, errors.New("Failed to parse connection url") } prefix := useURL.Path if !strings.HasSuffix(prefix, "/") { prefix = prefix + "/" } if !strings.HasPrefix(href, prefix) { return nil, fmt.Errorf("the URL '%v' isn't compatible with the base URL of the connection", href) } path := href[len(prefix):] service, err := NewSystemService(c, "").Service(path) if err != nil { return nil, err } serviceValue := reflect.ValueOf(service) // `object` is ptr, so use Elem() to get struct value hrefObjectValue := reflect.ValueOf(object).Elem() var requestCaller reflect.Value // If it's TypeStructSlice (list) if hrefObjectValue.FieldByName("slice").IsValid() { // Call List() method requestCaller = serviceValue.MethodByName("List").Call([]reflect.Value{})[0] } else { requestCaller = serviceValue.MethodByName("Get").Call([]reflect.Value{})[0] } callerResponse := requestCaller.MethodByName("Send").Call([]reflect.Value{})[0] if callerResponse.IsNil() { return nil, errors.New("Could not get response") } // Get the method index, which is not the Must version methodIndex := 0 callerResponseType := callerResponse.Type() for i := 0; i < callerResponseType.NumMethod(); i++ { if strings.HasPrefix(callerResponseType.Method(i).Name, "Must") { methodIndex = i break } } methodIndex = 1 - methodIndex // Retrieve the data returnedValues := callerResponse.Method(methodIndex).Call([]reflect.Value{}) result, ok := returnedValues[0].Interface(), returnedValues[1].Bool() if !ok { return nil, errors.New("The data retrieved not exists") } return result, nil } // authenticate uses OAuth to do authentication func (c *Connection) authenticate() (string, error) { if c.ssoToken == "" { token, err := c.getAccessToken() if err != nil { return "", err } c.ssoToken = token } return c.ssoToken, nil } // Close releases the resources used by this connection. func (c *Connection) Close() error { return c.CloseIfRevokeSSOToken(true) } // CloseIfRevokeSSOToken releases the resources used by this connection. // logout parameter specifies if token should be revoked, and so user should be logged out. func (c *Connection) CloseIfRevokeSSOToken(logout bool) error { if logout { return c.revokeAccessToken() } return nil } // getAccessToken obtains the access token from SSO to be used for bearer authentication. func (c *Connection) getAccessToken() (string, error) { if c.ssoToken == "" { // Build the URL and parameters required for the request: url, parameters := c.buildSsoAuthRequest() // Send the response and wait for the request: ssoResp, err := c.getSsoResponse(url, parameters) if err != nil { return "", err } // Top level array already handled in getSsoResponse() generically. if ssoResp.SsoError != "" { return "", &AuthError{ baseError{ Msg: fmt.Sprintf("Error during SSO authentication %s : %s", ssoResp.SsoErrorCode, ssoResp.SsoError), }, } } c.ssoToken = ssoResp.AccessToken } return c.ssoToken, nil } // Revoke the SSO access token. func (c *Connection) revokeAccessToken() error { // Build the URL and parameters required for the request: url, parameters := c.buildSsoRevokeRequest() // Send the response and wait for the request: ssoResp, err := c.getSsoResponse(url, parameters) if err != nil { return err } if ssoResp.SsoError != "" { return &AuthError{ baseError: baseError{ Msg: fmt.Sprintf("Error during SSO token revoke %s : %s", ssoResp.SsoErrorCode, ssoResp.SsoError), }, } } return nil } type ssoResponseJSONParent struct { children []ssoResponseJSON } type ssoResponseJSON struct { AccessToken string `json:"access_token"` SsoError string `json:"error"` SsoErrorCode string `json:"error_code"` } // Execute a get request to the SSO server and return the response. func (c *Connection) getSsoResponse(inputURL *url.URL, parameters map[string]string) (*ssoResponseJSON, error) { // POST request body: formValues := make(url.Values) for k1, v1 := range parameters { formValues[k1] = []string{v1} } // Build the net/http request: req, err := http.NewRequest("POST", inputURL.String(), strings.NewReader(formValues.Encode())) if err != nil { return nil, err } // Add request headers: req.Header.Add("User-Agent", fmt.Sprintf("GoSDK/%s", SDK_VERSION)) req.Header.Add("Content-Type", "application/x-www-form-urlencoded") req.Header.Add("Accept", "application/json") // Send the request and wait for the response: resp, err := c.client.Do(req) if err != nil { return nil, err } defer resp.Body.Close() // Parse and return the JSON response: body, err := ioutil.ReadAll(resp.Body) if err != nil { return nil, err } if resp.StatusCode == 401 { // Don't bother decoding, this will be a HTML message return nil, &AuthError{ baseError: baseError{ Msg: fmt.Sprintf("authentication failed (response was: %v)", string(body)), }, } } var jsonObj ssoResponseJSON err = json.Unmarshal(body, &jsonObj) if err != nil { return nil, fmt.Errorf("failed to parse non-array sso with response %v (%w)", string(body), err) } // Unmarshal successfully if jsonObj.AccessToken != "" || jsonObj.SsoError != "" || jsonObj.SsoErrorCode != "" { return &jsonObj, nil } // Maybe it's array encapsulated, try the other approach. var jsonObjList ssoResponseJSONParent err = json.Unmarshal(body, &jsonObjList) if err != nil { return nil, fmt.Errorf("failed to parse array sso with response %v (%w)", string(body), err) } if len(jsonObjList.children) > 0 { jsonObj.AccessToken = jsonObjList.children[0].AccessToken jsonObj.SsoError = jsonObjList.children[0].SsoError } // Maybe it's revoke access token response, which is empty return &jsonObj, nil } // buildSsoAuthRequest builds a the URL and parameters to acquire the access token from SSO. func (c *Connection) buildSsoAuthRequest() (*url.URL, map[string]string) { // Compute the entry point and the parameters: parameters := map[string]string{ "scope": "ovirt-app-api", } var entryPoint string if c.kerberos { entryPoint = "token-http-auth" parameters["grant_type"] = "urn:ovirt:params:oauth:grant-type:http" } else { entryPoint = "token" parameters["grant_type"] = "password" parameters["username"] = c.username parameters["password"] = c.password } // Compute the URL: var ssoURL url.URL = *c.url ssoURL.Path = fmt.Sprintf("/ovirt-engine/sso/oauth/%s", entryPoint) // Return the URL and the parameters: return &ssoURL, parameters } // buildSsoRevokeRequest builds a the URL and parameters to revoke the SSO access token. // string = the URL of the SSO service // map = hash containing the parameters required to perform the revoke func (c *Connection) buildSsoRevokeRequest() (*url.URL, map[string]string) { // Compute the parameters: parameters := map[string]string{ "scope": "", "token": c.token, } // Compute the URL: var ssoRevokeURL url.URL = *c.url ssoRevokeURL.Path = "/ovirt-engine/services/sso-logout" // Return the URL and the parameters: return &ssoRevokeURL, parameters } // SystemService returns a reference to the root of the services tree. func (c *Connection) SystemService() *SystemService { return NewSystemService(c, "") } // NewConnectionBuilder creates the `ConnectionBuilder struct instance func NewConnectionBuilder() *ConnectionBuilder { return &ConnectionBuilder{ conn: &Connection{ ssoTokenName: "access_token"}, err: nil} } // ConnectionBuilder represents a builder for the `Connection` struct type ConnectionBuilder struct { conn *Connection err error } // URL sets the url field for `Connection` instance func (connBuilder *ConnectionBuilder) URL(urlStr string) *ConnectionBuilder { // If already has errors, just return if connBuilder.err != nil { return connBuilder } // Save the URL: useURL, err := url.Parse(urlStr) if err != nil { connBuilder.err = err return connBuilder } connBuilder.conn.url = useURL return connBuilder } // Username sets the username field for `Connection` instance func (connBuilder *ConnectionBuilder) Username(username string) *ConnectionBuilder { // If already has errors, just return if connBuilder.err != nil { return connBuilder } connBuilder.conn.username = username return connBuilder } // Password sets the password field for `Connection` instance func (connBuilder *ConnectionBuilder) Password(password string) *ConnectionBuilder { // If already has errors, just return if connBuilder.err != nil { return connBuilder } connBuilder.conn.password = password return connBuilder } // Insecure sets the insecure field for `Connection` instance func (connBuilder *ConnectionBuilder) Insecure(insecure bool) *ConnectionBuilder { // If already has errors, just return if connBuilder.err != nil { return connBuilder } connBuilder.conn.insecure = insecure return connBuilder } // LogFunc sets the logging function field for `Connection` instance func (connBuilder *ConnectionBuilder) LogFunc(logFunc LogFunc) *ConnectionBuilder { // If already has errors, just return if connBuilder.err != nil { return connBuilder } connBuilder.conn.logFunc = logFunc return connBuilder } // Timeout sets the timeout field for `Connection` instance func (connBuilder *ConnectionBuilder) Timeout(timeout time.Duration) *ConnectionBuilder { // If already has errors, just return if connBuilder.err != nil { return connBuilder } connBuilder.conn.timeout = timeout return connBuilder } // CAFile sets the caFile field for `Connection` instance func (connBuilder *ConnectionBuilder) CAFile(caFilePath string) *ConnectionBuilder { // If already has errors, just return if connBuilder.err != nil { return connBuilder } connBuilder.conn.caFile = caFilePath return connBuilder } // TLSConfig sets a custom TLS configuration for the connection. This overrides any CA certificates that may have been // passed. func (connBuilder *ConnectionBuilder) TLSConfig(tlsConfig *tls.Config) *ConnectionBuilder { if connBuilder.err != nil { return connBuilder } connBuilder.conn.tlsConfig = tlsConfig return connBuilder } // CertPool sets the base certificate pool for the connection. func (connBuilder *ConnectionBuilder) CertPool(certPool *x509.CertPool) *ConnectionBuilder { // If already has errors, just return if connBuilder.err != nil { return connBuilder } connBuilder.conn.certPool = certPool return connBuilder } // CACert sets the caCert field for `Connection` instance func (connBuilder *ConnectionBuilder) CACert(caCert []byte) *ConnectionBuilder { // If already has errors, just return if connBuilder.err != nil { return connBuilder } connBuilder.conn.caCert = caCert return connBuilder } // Headers sets a map of custom HTTP headers to be added to each HTTP request func (connBuilder *ConnectionBuilder) Headers(headers map[string]string) *ConnectionBuilder { // If already has errors, just return if connBuilder.err != nil { return connBuilder } if connBuilder.conn.headers == nil { connBuilder.conn.headers = map[string]string{} } for hk, hv := range headers { connBuilder.conn.headers[hk] = hv } return connBuilder } // Kerberos sets the kerberos field for `Connection` instance func (connBuilder *ConnectionBuilder) Kerberos(kerbros bool) *ConnectionBuilder { // If already has errors, just return if connBuilder.err != nil { return connBuilder } // TODO: kerbros==true is not implemented if kerbros == true { connBuilder.err = errors.New("Kerberos is not currently implemented") return connBuilder } connBuilder.conn.kerberos = kerbros return connBuilder } // Compress sets the compress field for `Connection` instance func (connBuilder *ConnectionBuilder) Compress(compress bool) *ConnectionBuilder { // If already has errors, just return if connBuilder.err != nil { return connBuilder } connBuilder.conn.compress = compress return connBuilder } // Proxy sets the proxy server to the specified value. func (connBuilder *ConnectionBuilder) Proxy(proxy *url.URL) *ConnectionBuilder { connBuilder.conn.proxy = proxy return connBuilder } // ProxyFromEnvironment sets the proxy to come from the environment variables. func (connBuilder *ConnectionBuilder) ProxyFromEnvironment() *ConnectionBuilder { connBuilder.conn.proxyFromEnvironment = true return connBuilder } // Build constructs the `Connection` instance func (connBuilder *ConnectionBuilder) Build() (*Connection, error) { // If already has errors, just return if connBuilder.err != nil { return nil, connBuilder.err } // Check parameters if connBuilder.conn.url == nil { return nil, errors.New("the URL must not be empty") } if len(connBuilder.conn.username) == 0 { return nil, errors.New("the username must not be empty") } if len(connBuilder.conn.password) == 0 { return nil, errors.New("the password must not be empty") } if connBuilder.conn.url.Scheme == "https" { if connBuilder.conn.tlsConfig == nil { connBuilder.conn.tlsConfig = &tls.Config{ InsecureSkipVerify: connBuilder.conn.insecure, } if !connBuilder.conn.insecure { certPool := connBuilder.conn.certPool var err error if certPool == nil { certPool, err = x509.SystemCertPool() if err != nil { // This happens when the system cert pool is not available. // This is the case on Windows, see https://github.com/golang/go/issues/16736 certPool = x509.NewCertPool() } } var caCerts []byte if len(connBuilder.conn.caFile) > 0 { // Check if the CA File specified exists. if _, err := os.Stat(connBuilder.conn.caFile); os.IsNotExist(err) { return nil, fmt.Errorf("failed to check the CA file '%s' (%w)", connBuilder.conn.caFile, err) } caCerts, err = ioutil.ReadFile(connBuilder.conn.caFile) if err != nil { return nil, err } } else { caCerts = connBuilder.conn.caCert } if len(caCerts) > 0 && !certPool.AppendCertsFromPEM(caCerts) { return nil, fmt.Errorf("failed to parse CA certificate(s)") } connBuilder.conn.tlsConfig.RootCAs = certPool } } } var proxy func(r *http.Request) (*url.URL, error) if connBuilder.conn.proxyFromEnvironment { proxy = http.ProxyFromEnvironment } else if connBuilder.conn.proxy != nil { proxy = func(r *http.Request) (*url.URL, error) { return connBuilder.conn.proxy, nil } } connBuilder.conn.client = &http.Client{ Timeout: connBuilder.conn.timeout, Transport: &http.Transport{ // Close the http connection after calling resp.Body.Close() DisableKeepAlives: true, DisableCompression: !connBuilder.conn.compress, TLSClientConfig: connBuilder.conn.tlsConfig, Proxy: proxy, }, } return connBuilder.conn, nil }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/ovirt/go-ovirt/writer.go
vendor/github.com/ovirt/go-ovirt/writer.go
// // Copyright (c) 2017 Joey <majunjiev@gmail.com>. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // package ovirtsdk import ( "bufio" "fmt" "io" "strconv" "time" "unicode/utf8" ) var ( esc_quot = []byte("&#34;") // shorter than "&quot;" esc_apos = []byte("&#39;") // shorter than "&apos;" esc_amp = []byte("&amp;") esc_lt = []byte("&lt;") esc_gt = []byte("&gt;") esc_tab = []byte("&#x9;") esc_nl = []byte("&#xA;") esc_cr = []byte("&#xD;") esc_fffd = []byte("\uFFFD") // Unicode replacement character ) // XMLWriter marshalizes the struct to XML type XMLWriter struct { *bufio.Writer } // NewXMLWriter creates a XMLWriter instance func NewXMLWriter(w io.Writer) *XMLWriter { return &XMLWriter{ Writer: bufio.NewWriter(w), } } func (writer *XMLWriter) WriteElement(uri, name, value string, attrs map[string]string) error { if name == "" { return fmt.Errorf("xml: start tag with no name") } writer.WriteStart(uri, name, attrs) writer.WriteString(value) writer.WriteEnd(name) return nil } func (writer *XMLWriter) WriteStart(uri, name string, attrs map[string]string) error { if name == "" { return fmt.Errorf("xml: start tag with no name") } writer.WriteByte('<') writer.WriteString(name) if uri != "" { writer.WriteString(` xmlns="`) writer.EscapeString(uri) writer.WriteByte('"') } if attrs != nil && len(attrs) > 0 { for attrName, attrValue := range attrs { writer.WriteByte(' ') writer.WriteString(attrName) writer.WriteString(`="`) writer.EscapeString(attrValue) writer.WriteByte('"') } } writer.WriteByte('>') return nil } func (writer *XMLWriter) WriteEnd(name string) error { if name == "" { return fmt.Errorf("xml: end tag with no name") } writer.WriteByte('<') writer.WriteByte('/') writer.WriteString(name) writer.WriteByte('>') return nil } func (writer *XMLWriter) WriteCharacter(name, s string) error { return writer.WriteElement("", name, s, nil) } func (writer *XMLWriter) WriteCharacters(name string, ss []string) error { for _, s := range ss { err := writer.WriteCharacter(name, s) if err != nil { return err } } return nil } func (writer *XMLWriter) WriteBool(name string, b bool) error { return writer.WriteElement("", name, writer.FormatBool(b), nil) } func (writer *XMLWriter) WriteBools(name string, bs []bool) error { for _, b := range bs { err := writer.WriteBool(name, b) if err != nil { return err } } return nil } func (writer *XMLWriter) FormatBool(b bool) string { return strconv.FormatBool(b) } func (writer *XMLWriter) WriteInt64(name string, i int64) error { return writer.WriteElement("", name, writer.FormatInt64(i), nil) } func (writer *XMLWriter) WriteInt64s(name string, is []int64) error { for _, i := range is { err := writer.WriteInt64(name, i) if err != nil { return err } } return nil } func (writer *XMLWriter) FormatInt64(i int64) string { return strconv.FormatInt(i, 10) } func (writer *XMLWriter) WriteFloat64(name string, f float64) error { return writer.WriteElement("", name, writer.FormatFloat64(f), nil) } func (writer *XMLWriter) WriteFloat64s(name string, fs []float64) error { for _, f := range fs { err := writer.WriteFloat64(name, f) if err != nil { return err } } return nil } func (writer *XMLWriter) FormatFloat64(f float64) string { return strconv.FormatFloat(f, 'e', 3, 64) } func (writer *XMLWriter) WriteDate(name string, t time.Time) error { return writer.WriteElement("", name, writer.FormatDate(t), nil) } func (writer *XMLWriter) WriteDates(name string, ts []time.Time) error { for _, t := range ts { err := writer.WriteDate(name, t) if err != nil { return err } } return nil } func (writer *XMLWriter) FormatDate(t time.Time) string { return t.Format(time.RFC3339Nano) } func (writer *XMLWriter) EscapeString(s string) { var esc []byte last := 0 for i := 0; i < len(s); { r, width := utf8.DecodeRuneInString(s[i:]) i += width switch r { case '"': esc = esc_quot case '\'': esc = esc_apos case '&': esc = esc_amp case '<': esc = esc_lt case '>': esc = esc_gt case '\t': esc = esc_tab case '\n': esc = esc_nl case '\r': esc = esc_cr default: if !isInCharacterRange(r) || (r == 0xFFFD && width == 1) { esc = esc_fffd break } continue } writer.WriteString(s[last : i-width]) writer.Write(esc) last = i } writer.WriteString(s[last:]) } // Decide whether the given rune is in the XML Character Range, per // the Char production of http://www.xml.com/axml/testaxml.htm, // Section 2.2 Characters. func isInCharacterRange(r rune) (inrange bool) { return r == 0x09 || r == 0x0A || r == 0x0D || r >= 0x20 && r <= 0xDF77 || r >= 0xE000 && r <= 0xFFFD || r >= 0x10000 && r <= 0x10FFFF }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/ovirt/go-ovirt/reader.go
vendor/github.com/ovirt/go-ovirt/reader.go
// // Copyright (c) 2017 Joey <majunjiev@gmail.com>. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // package ovirtsdk import ( "bytes" "encoding/xml" "errors" "fmt" "io" "strconv" "time" ) // XMLTagNotMatchError indicates the error of XML tag // not matched when unmarshaling XML type XMLTagNotMatchError struct { ActualTag string ExpectedTag string } func (err XMLTagNotMatchError) Error() string { return fmt.Sprintf("Tag not matched: expect <%v> but got <%v>", err.ExpectedTag, err.ActualTag) } // CanForward indicates if Decoder has been finished func CanForward(tok xml.Token) (bool, error) { switch tok.(type) { case xml.StartElement: return true, nil case xml.EndElement: return false, nil default: return true, nil } } // XMLReader unmarshalizes the xml to struct type XMLReader struct { *xml.Decoder } // NewXMLReader creates a XMLReader instance func NewXMLReader(b []byte) *XMLReader { return &XMLReader{ Decoder: xml.NewDecoder(bytes.NewReader(b)), } } // FindStartElement finds the right next StartElement func (reader *XMLReader) FindStartElement() (*xml.StartElement, error) { // Find start element if we need it. for { tok, err := reader.Next() if err != nil { fmt.Printf("err is %v\n", err) break } tok = xml.CopyToken(tok) if tok, ok := tok.(xml.StartElement); ok { return &tok, nil } } return nil, errors.New("Failed to find StartElement") } // Next calls xml.Decoder.Token() to get the next xml.Token func (reader *XMLReader) Next() (xml.Token, error) { return reader.Token() } // ReadString reads the xml.CharData as a string after xml.StartElement func (reader *XMLReader) ReadString(start *xml.StartElement) (string, error) { if start == nil { st, err := reader.FindStartElement() if err != nil { return "", err } start = st } var buf []byte depth := 1 for depth > 0 { t, err := reader.Next() if err != nil { if err == io.EOF { break } return "", err } switch t := t.(type) { case xml.CharData: if depth == 1 { buf = append(buf, t...) } case xml.StartElement: depth++ case xml.EndElement: depth-- } } return string(buf), nil } // ReadStrings reads the xml.CharData of all subelements with a slice of string returned func (reader *XMLReader) ReadStrings(start *xml.StartElement) ([]string, error) { var strings []string if start == nil { st, err := reader.FindStartElement() if err != nil { return nil, err } start = st } depth := 1 for depth > 0 { t, err := reader.Next() if err != nil { if err == io.EOF { break } return nil, err } switch t := t.(type) { case xml.StartElement: str, err := reader.ReadString(&t) if err != nil { return nil, err } strings = append(strings, str) case xml.EndElement: depth-- } } return strings, nil } // ReadBool reads the xml.CharData as bool func (reader *XMLReader) ReadBool(start *xml.StartElement) (bool, error) { str, err := reader.ReadString(start) if err != nil { return false, err } return strconv.ParseBool(str) } func (reader *XMLReader) ReadBools(start *xml.StartElement) ([]bool, error) { strs, err := reader.ReadStrings(start) if err != nil { return nil, err } var bools []bool for _, sv := range strs { bv, err := strconv.ParseBool(sv) if err != nil { return nil, err } bools = append(bools, bv) } return bools, nil } // ReadInt64 reads the xml.CharData as int64 func (reader *XMLReader) ReadInt64(start *xml.StartElement) (int64, error) { str, err := reader.ReadString(start) if err != nil { return 0, err } return strconv.ParseInt(str, 10, 64) } func (reader *XMLReader) ReadInt64s(start *xml.StartElement) ([]int64, error) { strs, err := reader.ReadStrings(start) if err != nil { return nil, err } var int64s []int64 for _, sv := range strs { iv, err := strconv.ParseInt(sv, 10, 64) if err != nil { return nil, err } int64s = append(int64s, iv) } return int64s, nil } func (reader *XMLReader) ReadFloat64(start *xml.StartElement) (float64, error) { str, err := reader.ReadString(start) if err != nil { return 0.0, err } return strconv.ParseFloat(str, 64) } func (reader *XMLReader) ReadFloat64s(start *xml.StartElement) ([]float64, error) { strs, err := reader.ReadStrings(start) if err != nil { return nil, err } var float64s []float64 for _, sv := range strs { fv, err := strconv.ParseFloat(sv, 64) if err != nil { return nil, err } float64s = append(float64s, fv) } return float64s, nil } // ReadTime reads the xml.CharData as time.Time func (reader *XMLReader) ReadTime(start *xml.StartElement) (time.Time, error) { str, err := reader.ReadString(start) if err != nil { var t time.Time return t, err } return time.Parse(time.RFC3339Nano, str) } func (reader *XMLReader) ReadTimes(start *xml.StartElement) ([]time.Time, error) { strs, err := reader.ReadStrings(start) if err != nil { return nil, err } var times []time.Time for _, sv := range strs { tv, err := time.Parse(time.RFC3339Nano, sv) if err != nil { return nil, err } times = append(times, tv) } return times, nil }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/ovirt/go-ovirt/writers.go
vendor/github.com/ovirt/go-ovirt/writers.go
// Copyright (c) 2017 Joey <majunjiev@gmail.com>. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package ovirtsdk import ( "fmt" ) func XMLAffinityGroupWriteOne(writer *XMLWriter, object *AffinityGroup, tag string) error { if object == nil { return fmt.Errorf("input object pointer is nil") } if tag == "" { tag = "affinity_group" } var attrs map[string]string if r, ok := object.Id(); ok { if attrs == nil { attrs = make(map[string]string) } attrs["id"] = r } writer.WriteStart("", tag, attrs) if r, ok := object.Cluster(); ok { XMLClusterWriteOne(writer, r, "cluster") } if r, ok := object.Comment(); ok { writer.WriteCharacter("comment", r) } if r, ok := object.Description(); ok { writer.WriteCharacter("description", r) } if r, ok := object.Enforcing(); ok { writer.WriteBool("enforcing", r) } if r, ok := object.HostLabels(); ok { XMLAffinityLabelWriteMany(writer, r, "host_labels", "affinity_label") } if r, ok := object.Hosts(); ok { XMLHostWriteMany(writer, r, "hosts", "host") } if r, ok := object.HostsRule(); ok { XMLAffinityRuleWriteOne(writer, r, "hosts_rule") } if r, ok := object.Name(); ok { writer.WriteCharacter("name", r) } if r, ok := object.Positive(); ok { writer.WriteBool("positive", r) } if r, ok := object.Priority(); ok { writer.WriteFloat64("priority", r) } if r, ok := object.VmLabels(); ok { XMLAffinityLabelWriteMany(writer, r, "vm_labels", "affinity_label") } if r, ok := object.Vms(); ok { XMLVmWriteMany(writer, r, "vms", "vm") } if r, ok := object.VmsRule(); ok { XMLAffinityRuleWriteOne(writer, r, "vms_rule") } writer.WriteEnd(tag) return nil } func XMLAffinityGroupWriteMany(writer *XMLWriter, structSlice *AffinityGroupSlice, plural, singular string) error { if plural == "" { plural = "affinity_groups" } if singular == "" { singular = "affinity_group" } writer.WriteStart("", plural, nil) for _, o := range structSlice.Slice() { XMLAffinityGroupWriteOne(writer, o, singular) } writer.WriteEnd(plural) return nil } func XMLAffinityLabelWriteOne(writer *XMLWriter, object *AffinityLabel, tag string) error { if object == nil { return fmt.Errorf("input object pointer is nil") } if tag == "" { tag = "affinity_label" } var attrs map[string]string if r, ok := object.Id(); ok { if attrs == nil { attrs = make(map[string]string) } attrs["id"] = r } writer.WriteStart("", tag, attrs) if r, ok := object.Comment(); ok { writer.WriteCharacter("comment", r) } if r, ok := object.Description(); ok { writer.WriteCharacter("description", r) } if r, ok := object.HasImplicitAffinityGroup(); ok { writer.WriteBool("has_implicit_affinity_group", r) } if r, ok := object.Hosts(); ok { XMLHostWriteMany(writer, r, "hosts", "host") } if r, ok := object.Name(); ok { writer.WriteCharacter("name", r) } if r, ok := object.ReadOnly(); ok { writer.WriteBool("read_only", r) } if r, ok := object.Vms(); ok { XMLVmWriteMany(writer, r, "vms", "vm") } writer.WriteEnd(tag) return nil } func XMLAffinityLabelWriteMany(writer *XMLWriter, structSlice *AffinityLabelSlice, plural, singular string) error { if plural == "" { plural = "affinity_labels" } if singular == "" { singular = "affinity_label" } writer.WriteStart("", plural, nil) for _, o := range structSlice.Slice() { XMLAffinityLabelWriteOne(writer, o, singular) } writer.WriteEnd(plural) return nil } func XMLAffinityRuleWriteOne(writer *XMLWriter, object *AffinityRule, tag string) error { if object == nil { return fmt.Errorf("input object pointer is nil") } if tag == "" { tag = "affinity_rule" } writer.WriteStart("", tag, nil) if r, ok := object.Enabled(); ok { writer.WriteBool("enabled", r) } if r, ok := object.Enforcing(); ok { writer.WriteBool("enforcing", r) } if r, ok := object.Positive(); ok { writer.WriteBool("positive", r) } writer.WriteEnd(tag) return nil } func XMLAffinityRuleWriteMany(writer *XMLWriter, structSlice *AffinityRuleSlice, plural, singular string) error { if plural == "" { plural = "affinity_rules" } if singular == "" { singular = "affinity_rule" } writer.WriteStart("", plural, nil) for _, o := range structSlice.Slice() { XMLAffinityRuleWriteOne(writer, o, singular) } writer.WriteEnd(plural) return nil } func XMLAgentWriteOne(writer *XMLWriter, object *Agent, tag string) error { if object == nil { return fmt.Errorf("input object pointer is nil") } if tag == "" { tag = "agent" } var attrs map[string]string if r, ok := object.Id(); ok { if attrs == nil { attrs = make(map[string]string) } attrs["id"] = r } writer.WriteStart("", tag, attrs) if r, ok := object.Address(); ok { writer.WriteCharacter("address", r) } if r, ok := object.Comment(); ok { writer.WriteCharacter("comment", r) } if r, ok := object.Concurrent(); ok { writer.WriteBool("concurrent", r) } if r, ok := object.Description(); ok { writer.WriteCharacter("description", r) } if r, ok := object.EncryptOptions(); ok { writer.WriteBool("encrypt_options", r) } if r, ok := object.Host(); ok { XMLHostWriteOne(writer, r, "host") } if r, ok := object.Name(); ok { writer.WriteCharacter("name", r) } if r, ok := object.Options(); ok { XMLOptionWriteMany(writer, r, "options", "option") } if r, ok := object.Order(); ok { writer.WriteInt64("order", r) } if r, ok := object.Password(); ok { writer.WriteCharacter("password", r) } if r, ok := object.Port(); ok { writer.WriteInt64("port", r) } if r, ok := object.Type(); ok { writer.WriteCharacter("type", r) } if r, ok := object.Username(); ok { writer.WriteCharacter("username", r) } writer.WriteEnd(tag) return nil } func XMLAgentWriteMany(writer *XMLWriter, structSlice *AgentSlice, plural, singular string) error { if plural == "" { plural = "agents" } if singular == "" { singular = "agent" } writer.WriteStart("", plural, nil) for _, o := range structSlice.Slice() { XMLAgentWriteOne(writer, o, singular) } writer.WriteEnd(plural) return nil } func XMLAgentConfigurationWriteOne(writer *XMLWriter, object *AgentConfiguration, tag string) error { if object == nil { return fmt.Errorf("input object pointer is nil") } if tag == "" { tag = "agent_configuration" } writer.WriteStart("", tag, nil) if r, ok := object.Address(); ok { writer.WriteCharacter("address", r) } if r, ok := object.BrokerType(); ok { XMLMessageBrokerTypeWriteOne(writer, r, "broker_type") } if r, ok := object.NetworkMappings(); ok { writer.WriteCharacter("network_mappings", r) } if r, ok := object.Password(); ok { writer.WriteCharacter("password", r) } if r, ok := object.Port(); ok { writer.WriteInt64("port", r) } if r, ok := object.Username(); ok { writer.WriteCharacter("username", r) } writer.WriteEnd(tag) return nil } func XMLAgentConfigurationWriteMany(writer *XMLWriter, structSlice *AgentConfigurationSlice, plural, singular string) error { if plural == "" { plural = "agent_configurations" } if singular == "" { singular = "agent_configuration" } writer.WriteStart("", plural, nil) for _, o := range structSlice.Slice() { XMLAgentConfigurationWriteOne(writer, o, singular) } writer.WriteEnd(plural) return nil } func XMLApiWriteOne(writer *XMLWriter, object *Api, tag string) error { if object == nil { return fmt.Errorf("input object pointer is nil") } if tag == "" { tag = "api" } writer.WriteStart("", tag, nil) if r, ok := object.AuthenticatedUser(); ok { XMLUserWriteOne(writer, r, "authenticated_user") } if r, ok := object.EffectiveUser(); ok { XMLUserWriteOne(writer, r, "effective_user") } if r, ok := object.ProductInfo(); ok { XMLProductInfoWriteOne(writer, r, "product_info") } if r, ok := object.SpecialObjects(); ok { XMLSpecialObjectsWriteOne(writer, r, "special_objects") } if r, ok := object.Summary(); ok { XMLApiSummaryWriteOne(writer, r, "summary") } if r, ok := object.Time(); ok { writer.WriteDate("time", r) } writer.WriteEnd(tag) return nil } func XMLApiWriteMany(writer *XMLWriter, structSlice *ApiSlice, plural, singular string) error { if plural == "" { plural = "apis" } if singular == "" { singular = "api" } writer.WriteStart("", plural, nil) for _, o := range structSlice.Slice() { XMLApiWriteOne(writer, o, singular) } writer.WriteEnd(plural) return nil } func XMLApiSummaryWriteOne(writer *XMLWriter, object *ApiSummary, tag string) error { if object == nil { return fmt.Errorf("input object pointer is nil") } if tag == "" { tag = "api_summary" } writer.WriteStart("", tag, nil) if r, ok := object.Hosts(); ok { XMLApiSummaryItemWriteOne(writer, r, "hosts") } if r, ok := object.StorageDomains(); ok { XMLApiSummaryItemWriteOne(writer, r, "storage_domains") } if r, ok := object.Users(); ok { XMLApiSummaryItemWriteOne(writer, r, "users") } if r, ok := object.Vms(); ok { XMLApiSummaryItemWriteOne(writer, r, "vms") } writer.WriteEnd(tag) return nil } func XMLApiSummaryWriteMany(writer *XMLWriter, structSlice *ApiSummarySlice, plural, singular string) error { if plural == "" { plural = "api_summaries" } if singular == "" { singular = "api_summary" } writer.WriteStart("", plural, nil) for _, o := range structSlice.Slice() { XMLApiSummaryWriteOne(writer, o, singular) } writer.WriteEnd(plural) return nil } func XMLApiSummaryItemWriteOne(writer *XMLWriter, object *ApiSummaryItem, tag string) error { if object == nil { return fmt.Errorf("input object pointer is nil") } if tag == "" { tag = "api_summary_item" } writer.WriteStart("", tag, nil) if r, ok := object.Active(); ok { writer.WriteInt64("active", r) } if r, ok := object.Total(); ok { writer.WriteInt64("total", r) } writer.WriteEnd(tag) return nil } func XMLApiSummaryItemWriteMany(writer *XMLWriter, structSlice *ApiSummaryItemSlice, plural, singular string) error { if plural == "" { plural = "api_summary_items" } if singular == "" { singular = "api_summary_item" } writer.WriteStart("", plural, nil) for _, o := range structSlice.Slice() { XMLApiSummaryItemWriteOne(writer, o, singular) } writer.WriteEnd(plural) return nil } func XMLApplicationWriteOne(writer *XMLWriter, object *Application, tag string) error { if object == nil { return fmt.Errorf("input object pointer is nil") } if tag == "" { tag = "application" } var attrs map[string]string if r, ok := object.Id(); ok { if attrs == nil { attrs = make(map[string]string) } attrs["id"] = r } writer.WriteStart("", tag, attrs) if r, ok := object.Comment(); ok { writer.WriteCharacter("comment", r) } if r, ok := object.Description(); ok { writer.WriteCharacter("description", r) } if r, ok := object.Name(); ok { writer.WriteCharacter("name", r) } if r, ok := object.Vm(); ok { XMLVmWriteOne(writer, r, "vm") } writer.WriteEnd(tag) return nil } func XMLApplicationWriteMany(writer *XMLWriter, structSlice *ApplicationSlice, plural, singular string) error { if plural == "" { plural = "applications" } if singular == "" { singular = "application" } writer.WriteStart("", plural, nil) for _, o := range structSlice.Slice() { XMLApplicationWriteOne(writer, o, singular) } writer.WriteEnd(plural) return nil } func XMLAuthorizedKeyWriteOne(writer *XMLWriter, object *AuthorizedKey, tag string) error { if object == nil { return fmt.Errorf("input object pointer is nil") } if tag == "" { tag = "authorized_key" } var attrs map[string]string if r, ok := object.Id(); ok { if attrs == nil { attrs = make(map[string]string) } attrs["id"] = r } writer.WriteStart("", tag, attrs) if r, ok := object.Comment(); ok { writer.WriteCharacter("comment", r) } if r, ok := object.Description(); ok { writer.WriteCharacter("description", r) } if r, ok := object.Key(); ok { writer.WriteCharacter("key", r) } if r, ok := object.Name(); ok { writer.WriteCharacter("name", r) } if r, ok := object.User(); ok { XMLUserWriteOne(writer, r, "user") } writer.WriteEnd(tag) return nil } func XMLAuthorizedKeyWriteMany(writer *XMLWriter, structSlice *AuthorizedKeySlice, plural, singular string) error { if plural == "" { plural = "authorized_keys" } if singular == "" { singular = "authorized_key" } writer.WriteStart("", plural, nil) for _, o := range structSlice.Slice() { XMLAuthorizedKeyWriteOne(writer, o, singular) } writer.WriteEnd(plural) return nil } func XMLBackupWriteOne(writer *XMLWriter, object *Backup, tag string) error { if object == nil { return fmt.Errorf("input object pointer is nil") } if tag == "" { tag = "backup" } var attrs map[string]string if r, ok := object.Id(); ok { if attrs == nil { attrs = make(map[string]string) } attrs["id"] = r } writer.WriteStart("", tag, attrs) if r, ok := object.Comment(); ok { writer.WriteCharacter("comment", r) } if r, ok := object.CreationDate(); ok { writer.WriteDate("creation_date", r) } if r, ok := object.Description(); ok { writer.WriteCharacter("description", r) } if r, ok := object.Disks(); ok { XMLDiskWriteMany(writer, r, "disks", "disk") } if r, ok := object.FromCheckpointId(); ok { writer.WriteCharacter("from_checkpoint_id", r) } if r, ok := object.Host(); ok { XMLHostWriteOne(writer, r, "host") } if r, ok := object.Name(); ok { writer.WriteCharacter("name", r) } if r, ok := object.Phase(); ok { XMLBackupPhaseWriteOne(writer, r, "phase") } if r, ok := object.ToCheckpointId(); ok { writer.WriteCharacter("to_checkpoint_id", r) } if r, ok := object.Vm(); ok { XMLVmWriteOne(writer, r, "vm") } writer.WriteEnd(tag) return nil } func XMLBackupWriteMany(writer *XMLWriter, structSlice *BackupSlice, plural, singular string) error { if plural == "" { plural = "backups" } if singular == "" { singular = "backup" } writer.WriteStart("", plural, nil) for _, o := range structSlice.Slice() { XMLBackupWriteOne(writer, o, singular) } writer.WriteEnd(plural) return nil } func XMLBalanceWriteOne(writer *XMLWriter, object *Balance, tag string) error { if object == nil { return fmt.Errorf("input object pointer is nil") } if tag == "" { tag = "balance" } var attrs map[string]string if r, ok := object.Id(); ok { if attrs == nil { attrs = make(map[string]string) } attrs["id"] = r } writer.WriteStart("", tag, attrs) if r, ok := object.Comment(); ok { writer.WriteCharacter("comment", r) } if r, ok := object.Description(); ok { writer.WriteCharacter("description", r) } if r, ok := object.Name(); ok { writer.WriteCharacter("name", r) } if r, ok := object.SchedulingPolicy(); ok { XMLSchedulingPolicyWriteOne(writer, r, "scheduling_policy") } if r, ok := object.SchedulingPolicyUnit(); ok { XMLSchedulingPolicyUnitWriteOne(writer, r, "scheduling_policy_unit") } writer.WriteEnd(tag) return nil } func XMLBalanceWriteMany(writer *XMLWriter, structSlice *BalanceSlice, plural, singular string) error { if plural == "" { plural = "balances" } if singular == "" { singular = "balance" } writer.WriteStart("", plural, nil) for _, o := range structSlice.Slice() { XMLBalanceWriteOne(writer, o, singular) } writer.WriteEnd(plural) return nil } func XMLBiosWriteOne(writer *XMLWriter, object *Bios, tag string) error { if object == nil { return fmt.Errorf("input object pointer is nil") } if tag == "" { tag = "bios" } writer.WriteStart("", tag, nil) if r, ok := object.BootMenu(); ok { XMLBootMenuWriteOne(writer, r, "boot_menu") } if r, ok := object.Type(); ok { XMLBiosTypeWriteOne(writer, r, "type") } writer.WriteEnd(tag) return nil } func XMLBiosWriteMany(writer *XMLWriter, structSlice *BiosSlice, plural, singular string) error { if plural == "" { plural = "bioss" } if singular == "" { singular = "bios" } writer.WriteStart("", plural, nil) for _, o := range structSlice.Slice() { XMLBiosWriteOne(writer, o, singular) } writer.WriteEnd(plural) return nil } func XMLBlockStatisticWriteOne(writer *XMLWriter, object *BlockStatistic, tag string) error { if object == nil { return fmt.Errorf("input object pointer is nil") } if tag == "" { tag = "block_statistic" } writer.WriteStart("", tag, nil) if r, ok := object.Statistics(); ok { XMLStatisticWriteMany(writer, r, "statistics", "statistic") } writer.WriteEnd(tag) return nil } func XMLBlockStatisticWriteMany(writer *XMLWriter, structSlice *BlockStatisticSlice, plural, singular string) error { if plural == "" { plural = "block_statistics" } if singular == "" { singular = "block_statistic" } writer.WriteStart("", plural, nil) for _, o := range structSlice.Slice() { XMLBlockStatisticWriteOne(writer, o, singular) } writer.WriteEnd(plural) return nil } func XMLBondingWriteOne(writer *XMLWriter, object *Bonding, tag string) error { if object == nil { return fmt.Errorf("input object pointer is nil") } if tag == "" { tag = "bonding" } writer.WriteStart("", tag, nil) if r, ok := object.ActiveSlave(); ok { XMLHostNicWriteOne(writer, r, "active_slave") } if r, ok := object.AdPartnerMac(); ok { XMLMacWriteOne(writer, r, "ad_partner_mac") } if r, ok := object.Options(); ok { XMLOptionWriteMany(writer, r, "options", "option") } if r, ok := object.Slaves(); ok { XMLHostNicWriteMany(writer, r, "slaves", "host_nic") } writer.WriteEnd(tag) return nil } func XMLBondingWriteMany(writer *XMLWriter, structSlice *BondingSlice, plural, singular string) error { if plural == "" { plural = "bondings" } if singular == "" { singular = "bonding" } writer.WriteStart("", plural, nil) for _, o := range structSlice.Slice() { XMLBondingWriteOne(writer, o, singular) } writer.WriteEnd(plural) return nil } func XMLBookmarkWriteOne(writer *XMLWriter, object *Bookmark, tag string) error { if object == nil { return fmt.Errorf("input object pointer is nil") } if tag == "" { tag = "bookmark" } var attrs map[string]string if r, ok := object.Id(); ok { if attrs == nil { attrs = make(map[string]string) } attrs["id"] = r } writer.WriteStart("", tag, attrs) if r, ok := object.Comment(); ok { writer.WriteCharacter("comment", r) } if r, ok := object.Description(); ok { writer.WriteCharacter("description", r) } if r, ok := object.Name(); ok { writer.WriteCharacter("name", r) } if r, ok := object.Value(); ok { writer.WriteCharacter("value", r) } writer.WriteEnd(tag) return nil } func XMLBookmarkWriteMany(writer *XMLWriter, structSlice *BookmarkSlice, plural, singular string) error { if plural == "" { plural = "bookmarks" } if singular == "" { singular = "bookmark" } writer.WriteStart("", plural, nil) for _, o := range structSlice.Slice() { XMLBookmarkWriteOne(writer, o, singular) } writer.WriteEnd(plural) return nil } func XMLBootWriteOne(writer *XMLWriter, object *Boot, tag string) error { if object == nil { return fmt.Errorf("input object pointer is nil") } if tag == "" { tag = "boot" } writer.WriteStart("", tag, nil) if r, ok := object.Devices(); ok { XMLBootDeviceWriteMany(writer, r, "devices", "device") } writer.WriteEnd(tag) return nil } func XMLBootWriteMany(writer *XMLWriter, structSlice *BootSlice, plural, singular string) error { if plural == "" { plural = "boots" } if singular == "" { singular = "boot" } writer.WriteStart("", plural, nil) for _, o := range structSlice.Slice() { XMLBootWriteOne(writer, o, singular) } writer.WriteEnd(plural) return nil } func XMLBootMenuWriteOne(writer *XMLWriter, object *BootMenu, tag string) error { if object == nil { return fmt.Errorf("input object pointer is nil") } if tag == "" { tag = "boot_menu" } writer.WriteStart("", tag, nil) if r, ok := object.Enabled(); ok { writer.WriteBool("enabled", r) } writer.WriteEnd(tag) return nil } func XMLBootMenuWriteMany(writer *XMLWriter, structSlice *BootMenuSlice, plural, singular string) error { if plural == "" { plural = "boot_menus" } if singular == "" { singular = "boot_menu" } writer.WriteStart("", plural, nil) for _, o := range structSlice.Slice() { XMLBootMenuWriteOne(writer, o, singular) } writer.WriteEnd(plural) return nil } func XMLBrickProfileDetailWriteOne(writer *XMLWriter, object *BrickProfileDetail, tag string) error { if object == nil { return fmt.Errorf("input object pointer is nil") } if tag == "" { tag = "brick_profile_detail" } writer.WriteStart("", tag, nil) if r, ok := object.Brick(); ok { XMLGlusterBrickWriteOne(writer, r, "brick") } if r, ok := object.ProfileDetails(); ok { XMLProfileDetailWriteMany(writer, r, "profile_details", "profile_detail") } writer.WriteEnd(tag) return nil } func XMLBrickProfileDetailWriteMany(writer *XMLWriter, structSlice *BrickProfileDetailSlice, plural, singular string) error { if plural == "" { plural = "brick_profile_details" } if singular == "" { singular = "brick_profile_detail" } writer.WriteStart("", plural, nil) for _, o := range structSlice.Slice() { XMLBrickProfileDetailWriteOne(writer, o, singular) } writer.WriteEnd(plural) return nil } func XMLCdromWriteOne(writer *XMLWriter, object *Cdrom, tag string) error { if object == nil { return fmt.Errorf("input object pointer is nil") } if tag == "" { tag = "cdrom" } var attrs map[string]string if r, ok := object.Id(); ok { if attrs == nil { attrs = make(map[string]string) } attrs["id"] = r } writer.WriteStart("", tag, attrs) if r, ok := object.Comment(); ok { writer.WriteCharacter("comment", r) } if r, ok := object.Description(); ok { writer.WriteCharacter("description", r) } if r, ok := object.File(); ok { XMLFileWriteOne(writer, r, "file") } if r, ok := object.InstanceType(); ok { XMLInstanceTypeWriteOne(writer, r, "instance_type") } if r, ok := object.Name(); ok { writer.WriteCharacter("name", r) } if r, ok := object.Template(); ok { XMLTemplateWriteOne(writer, r, "template") } if r, ok := object.Vm(); ok { XMLVmWriteOne(writer, r, "vm") } if r, ok := object.Vms(); ok { XMLVmWriteMany(writer, r, "vms", "vm") } writer.WriteEnd(tag) return nil } func XMLCdromWriteMany(writer *XMLWriter, structSlice *CdromSlice, plural, singular string) error { if plural == "" { plural = "cdroms" } if singular == "" { singular = "cdrom" } writer.WriteStart("", plural, nil) for _, o := range structSlice.Slice() { XMLCdromWriteOne(writer, o, singular) } writer.WriteEnd(plural) return nil } func XMLCertificateWriteOne(writer *XMLWriter, object *Certificate, tag string) error { if object == nil { return fmt.Errorf("input object pointer is nil") } if tag == "" { tag = "certificate" } var attrs map[string]string if r, ok := object.Id(); ok { if attrs == nil { attrs = make(map[string]string) } attrs["id"] = r } writer.WriteStart("", tag, attrs) if r, ok := object.Comment(); ok { writer.WriteCharacter("comment", r) } if r, ok := object.Content(); ok { writer.WriteCharacter("content", r) } if r, ok := object.Description(); ok { writer.WriteCharacter("description", r) } if r, ok := object.Name(); ok { writer.WriteCharacter("name", r) } if r, ok := object.Organization(); ok { writer.WriteCharacter("organization", r) } if r, ok := object.Subject(); ok { writer.WriteCharacter("subject", r) } writer.WriteEnd(tag) return nil } func XMLCertificateWriteMany(writer *XMLWriter, structSlice *CertificateSlice, plural, singular string) error { if plural == "" { plural = "certificates" } if singular == "" { singular = "certificate" } writer.WriteStart("", plural, nil) for _, o := range structSlice.Slice() { XMLCertificateWriteOne(writer, o, singular) } writer.WriteEnd(plural) return nil } func XMLCheckpointWriteOne(writer *XMLWriter, object *Checkpoint, tag string) error { if object == nil { return fmt.Errorf("input object pointer is nil") } if tag == "" { tag = "checkpoint" } var attrs map[string]string if r, ok := object.Id(); ok { if attrs == nil { attrs = make(map[string]string) } attrs["id"] = r } writer.WriteStart("", tag, attrs) if r, ok := object.Comment(); ok { writer.WriteCharacter("comment", r) } if r, ok := object.CreationDate(); ok { writer.WriteDate("creation_date", r) } if r, ok := object.Description(); ok { writer.WriteCharacter("description", r) } if r, ok := object.Disks(); ok { XMLDiskWriteMany(writer, r, "disks", "disk") } if r, ok := object.Name(); ok { writer.WriteCharacter("name", r) } if r, ok := object.ParentId(); ok { writer.WriteCharacter("parent_id", r) } if r, ok := object.State(); ok { XMLCheckpointStateWriteOne(writer, r, "state") } if r, ok := object.Vm(); ok { XMLVmWriteOne(writer, r, "vm") } writer.WriteEnd(tag) return nil } func XMLCheckpointWriteMany(writer *XMLWriter, structSlice *CheckpointSlice, plural, singular string) error { if plural == "" { plural = "checkpoints" } if singular == "" { singular = "checkpoint" } writer.WriteStart("", plural, nil) for _, o := range structSlice.Slice() { XMLCheckpointWriteOne(writer, o, singular) } writer.WriteEnd(plural) return nil } func XMLCloudInitWriteOne(writer *XMLWriter, object *CloudInit, tag string) error { if object == nil { return fmt.Errorf("input object pointer is nil") } if tag == "" { tag = "cloud_init" } writer.WriteStart("", tag, nil) if r, ok := object.AuthorizedKeys(); ok { XMLAuthorizedKeyWriteMany(writer, r, "authorized_keys", "authorized_key") } if r, ok := object.Files(); ok { XMLFileWriteMany(writer, r, "files", "file") } if r, ok := object.Host(); ok { XMLHostWriteOne(writer, r, "host") } if r, ok := object.NetworkConfiguration(); ok { XMLNetworkConfigurationWriteOne(writer, r, "network_configuration") } if r, ok := object.RegenerateSshKeys(); ok { writer.WriteBool("regenerate_ssh_keys", r) } if r, ok := object.Timezone(); ok { writer.WriteCharacter("timezone", r) } if r, ok := object.Users(); ok { XMLUserWriteMany(writer, r, "users", "user") } writer.WriteEnd(tag) return nil } func XMLCloudInitWriteMany(writer *XMLWriter, structSlice *CloudInitSlice, plural, singular string) error { if plural == "" { plural = "cloud_inits" } if singular == "" { singular = "cloud_init" } writer.WriteStart("", plural, nil) for _, o := range structSlice.Slice() { XMLCloudInitWriteOne(writer, o, singular) } writer.WriteEnd(plural) return nil } func XMLClusterWriteOne(writer *XMLWriter, object *Cluster, tag string) error { if object == nil { return fmt.Errorf("input object pointer is nil") } if tag == "" { tag = "cluster" } var attrs map[string]string if r, ok := object.Id(); ok { if attrs == nil { attrs = make(map[string]string) } attrs["id"] = r } writer.WriteStart("", tag, attrs) if r, ok := object.AffinityGroups(); ok { XMLAffinityGroupWriteMany(writer, r, "affinity_groups", "affinity_group") } if r, ok := object.BallooningEnabled(); ok { writer.WriteBool("ballooning_enabled", r) } if r, ok := object.BiosType(); ok { XMLBiosTypeWriteOne(writer, r, "bios_type") } if r, ok := object.Comment(); ok { writer.WriteCharacter("comment", r) } if r, ok := object.Cpu(); ok { XMLCpuWriteOne(writer, r, "cpu") } if r, ok := object.CpuProfiles(); ok { XMLCpuProfileWriteMany(writer, r, "cpu_profiles", "cpu_profile") } if r, ok := object.CustomSchedulingPolicyProperties(); ok { XMLPropertyWriteMany(writer, r, "custom_scheduling_policy_properties", "property") } if r, ok := object.DataCenter(); ok { XMLDataCenterWriteOne(writer, r, "data_center") } if r, ok := object.Description(); ok { writer.WriteCharacter("description", r) } if r, ok := object.Display(); ok { XMLDisplayWriteOne(writer, r, "display") } if r, ok := object.EnabledFeatures(); ok { XMLClusterFeatureWriteMany(writer, r, "enabled_features", "cluster_feature") } if r, ok := object.ErrorHandling(); ok { XMLErrorHandlingWriteOne(writer, r, "error_handling") } if r, ok := object.ExternalNetworkProviders(); ok { XMLExternalProviderWriteMany(writer, r, "external_network_providers", "external_provider") } if r, ok := object.FencingPolicy(); ok { XMLFencingPolicyWriteOne(writer, r, "fencing_policy") } if r, ok := object.FipsMode(); ok { XMLFipsModeWriteOne(writer, r, "fips_mode") } if r, ok := object.FirewallType(); ok { XMLFirewallTypeWriteOne(writer, r, "firewall_type") } if r, ok := object.GlusterHooks(); ok { XMLGlusterHookWriteMany(writer, r, "gluster_hooks", "gluster_hook") } if r, ok := object.GlusterService(); ok { writer.WriteBool("gluster_service", r) } if r, ok := object.GlusterTunedProfile(); ok { writer.WriteCharacter("gluster_tuned_profile", r) } if r, ok := object.GlusterVolumes(); ok { XMLGlusterVolumeWriteMany(writer, r, "gluster_volumes", "gluster_volume") } if r, ok := object.HaReservation(); ok { writer.WriteBool("ha_reservation", r) } if r, ok := object.Ksm(); ok { XMLKsmWriteOne(writer, r, "ksm") } if r, ok := object.LogMaxMemoryUsedThreshold(); ok { writer.WriteInt64("log_max_memory_used_threshold", r) } if r, ok := object.LogMaxMemoryUsedThresholdType(); ok { XMLLogMaxMemoryUsedThresholdTypeWriteOne(writer, r, "log_max_memory_used_threshold_type") } if r, ok := object.MacPool(); ok { XMLMacPoolWriteOne(writer, r, "mac_pool") } if r, ok := object.MaintenanceReasonRequired(); ok { writer.WriteBool("maintenance_reason_required", r) } if r, ok := object.ManagementNetwork(); ok { XMLNetworkWriteOne(writer, r, "management_network") } if r, ok := object.MemoryPolicy(); ok { XMLMemoryPolicyWriteOne(writer, r, "memory_policy") } if r, ok := object.Migration(); ok { XMLMigrationOptionsWriteOne(writer, r, "migration") } if r, ok := object.Name(); ok { writer.WriteCharacter("name", r) } if r, ok := object.NetworkFilters(); ok { XMLNetworkFilterWriteMany(writer, r, "network_filters", "network_filter") } if r, ok := object.Networks(); ok { XMLNetworkWriteMany(writer, r, "networks", "network") } if r, ok := object.OptionalReason(); ok { writer.WriteBool("optional_reason", r) } if r, ok := object.Permissions(); ok { XMLPermissionWriteMany(writer, r, "permissions", "permission") } if r, ok := object.RequiredRngSources(); ok { XMLRngSourceWriteMany(writer, r, "required_rng_sources", "required_rng_source") } if r, ok := object.SchedulingPolicy(); ok { XMLSchedulingPolicyWriteOne(writer, r, "scheduling_policy") } if r, ok := object.SerialNumber(); ok { XMLSerialNumberWriteOne(writer, r, "serial_number") } if r, ok := object.SupportedVersions(); ok { XMLVersionWriteMany(writer, r, "supported_versions", "version") } if r, ok := object.SwitchType(); ok { XMLSwitchTypeWriteOne(writer, r, "switch_type") } if r, ok := object.ThreadsAsCores(); ok { writer.WriteBool("threads_as_cores", r) } if r, ok := object.TrustedService(); ok { writer.WriteBool("trusted_service", r) } if r, ok := object.TunnelMigration(); ok { writer.WriteBool("tunnel_migration", r) } if r, ok := object.Version(); ok { XMLVersionWriteOne(writer, r, "version") } if r, ok := object.VirtService(); ok { writer.WriteBool("virt_service", r) } if r, ok := object.VncEncryption(); ok { writer.WriteBool("vnc_encryption", r) } writer.WriteEnd(tag) return nil } func XMLClusterWriteMany(writer *XMLWriter, structSlice *ClusterSlice, plural, singular string) error { if plural == "" { plural = "clusters" } if singular == "" { singular = "cluster" } writer.WriteStart("", plural, nil) for _, o := range structSlice.Slice() { XMLClusterWriteOne(writer, o, singular) } writer.WriteEnd(plural) return nil } func XMLClusterFeatureWriteOne(writer *XMLWriter, object *ClusterFeature, tag string) error { if object == nil { return fmt.Errorf("input object pointer is nil") } if tag == "" { tag = "cluster_feature" } var attrs map[string]string if r, ok := object.Id(); ok { if attrs == nil { attrs = make(map[string]string) } attrs["id"] = r } writer.WriteStart("", tag, attrs) if r, ok := object.ClusterLevel(); ok {
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
true
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/ovirt/go-ovirt/type.go
vendor/github.com/ovirt/go-ovirt/type.go
// // Copyright (c) 2017 Joey <majunjiev@gmail.com>. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // package ovirtsdk type Href interface { Href() (string, bool) } // Link represents struct of href and rel attributes type Link struct { href *string rel *string } // Struct represents the base for all struts defined in types.go type Struct struct { href *string } func (p *Struct) Href() (string, bool) { if p.href != nil { return *p.href, true } return "", false } func (p *Struct) MustHref() string { if p.href == nil { panic("href attribute must exist") } return *p.href } func (p *Struct) SetHref(attr string) { p.href = &attr }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/ovirt/go-ovirt/http.go
vendor/github.com/ovirt/go-ovirt/http.go
// // Copyright (c) 2017 Joey <majunjiev@gmail.com>. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // package ovirtsdk
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/ovirt/go-ovirt/services.go
vendor/github.com/ovirt/go-ovirt/services.go
// Copyright (c) 2017 Joey <majunjiev@gmail.com>. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package ovirtsdk import ( "bytes" "fmt" "io/ioutil" "net/http" "net/http/httputil" "net/url" "strings" ) // This annotation is intended to specify what oVirt area is the annotated concept related to. Currently the following // areas are in use, and they are closely related to the oVirt teams, but not necessarily the same: // - Infrastructure // - Network // - SLA // - Storage // - Virtualization // A concept may be associated to more than one area, or to no area. // The value of this annotation is intended for reporting only, and it doesn't affect at all the generated code or the // validity of the model type AreaService struct { BaseService } func NewAreaService(connection *Connection, path string) *AreaService { var result AreaService result.connection = connection result.path = path return &result } // Service locator method, returns individual service on which the URI is dispatched. func (op *AreaService) Service(path string) (Service, error) { if path == "" { return op, nil } return nil, fmt.Errorf("The path <%s> doesn't correspond to any service", path) } func (op *AreaService) String() string { return fmt.Sprintf("AreaService:%s", op.path) } type FollowService struct { BaseService } func NewFollowService(connection *Connection, path string) *FollowService { var result FollowService result.connection = connection result.path = path return &result } // Service locator method, returns individual service on which the URI is dispatched. func (op *FollowService) Service(path string) (Service, error) { if path == "" { return op, nil } return nil, fmt.Errorf("The path <%s> doesn't correspond to any service", path) } func (op *FollowService) String() string { return fmt.Sprintf("FollowService:%s", op.path) } // This service manages a single host label assigned to an affinity group. type AffinityGroupHostLabelService struct { BaseService } func NewAffinityGroupHostLabelService(connection *Connection, path string) *AffinityGroupHostLabelService { var result AffinityGroupHostLabelService result.connection = connection result.path = path return &result } // Remove this label from the affinity group. type AffinityGroupHostLabelServiceRemoveRequest struct { AffinityGroupHostLabelService *AffinityGroupHostLabelService header map[string]string query map[string]string async *bool } func (p *AffinityGroupHostLabelServiceRemoveRequest) Header(key, value string) *AffinityGroupHostLabelServiceRemoveRequest { if p.header == nil { p.header = make(map[string]string) } p.header[key] = value return p } func (p *AffinityGroupHostLabelServiceRemoveRequest) Query(key, value string) *AffinityGroupHostLabelServiceRemoveRequest { if p.query == nil { p.query = make(map[string]string) } p.query[key] = value return p } func (p *AffinityGroupHostLabelServiceRemoveRequest) Async(async bool) *AffinityGroupHostLabelServiceRemoveRequest { p.async = &async return p } func (p *AffinityGroupHostLabelServiceRemoveRequest) Send() (*AffinityGroupHostLabelServiceRemoveResponse, error) { rawURL := fmt.Sprintf("%s%s", p.AffinityGroupHostLabelService.connection.URL(), p.AffinityGroupHostLabelService.path) values := make(url.Values) if p.async != nil { values["async"] = []string{fmt.Sprintf("%v", *p.async)} } if p.query != nil { for k, v := range p.query { values[k] = []string{v} } } if len(values) > 0 { rawURL = fmt.Sprintf("%s?%s", rawURL, values.Encode()) } req, err := http.NewRequest("DELETE", rawURL, nil) if err != nil { return nil, err } for hk, hv := range p.AffinityGroupHostLabelService.connection.headers { req.Header.Add(hk, hv) } if p.header != nil { for hk, hv := range p.header { req.Header.Add(hk, hv) } } req.Header.Add("User-Agent", fmt.Sprintf("GoSDK/%s", SDK_VERSION)) req.Header.Add("Version", "4") req.Header.Add("Content-Type", "application/xml") req.Header.Add("Accept", "application/xml") // get OAuth access token token, err := p.AffinityGroupHostLabelService.connection.authenticate() if err != nil { return nil, err } req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", token)) // Send the request and wait for the response resp, err := p.AffinityGroupHostLabelService.connection.client.Do(req) if err != nil { return nil, err } defer resp.Body.Close() if p.AffinityGroupHostLabelService.connection.logFunc != nil { dumpReq, err := httputil.DumpRequestOut(req, true) if err != nil { return nil, err } dumpResp, err := httputil.DumpResponse(resp, true) if err != nil { return nil, err } p.AffinityGroupHostLabelService.connection.logFunc("<<<<<<Request:\n%sResponse:\n%s>>>>>>\n", string(dumpReq), string(dumpResp)) } respBodyBytes, errReadBody := ioutil.ReadAll(resp.Body) if errReadBody != nil { return nil, errReadBody } if !Contains(resp.StatusCode, []int{200}) { return nil, CheckFault(respBodyBytes, resp) } return new(AffinityGroupHostLabelServiceRemoveResponse), nil } func (p *AffinityGroupHostLabelServiceRemoveRequest) MustSend() *AffinityGroupHostLabelServiceRemoveResponse { if v, err := p.Send(); err != nil { panic(err) } else { return v } } // Remove this label from the affinity group. type AffinityGroupHostLabelServiceRemoveResponse struct { } // Remove this label from the affinity group. func (p *AffinityGroupHostLabelService) Remove() *AffinityGroupHostLabelServiceRemoveRequest { return &AffinityGroupHostLabelServiceRemoveRequest{AffinityGroupHostLabelService: p} } // Service locator method, returns individual service on which the URI is dispatched. func (op *AffinityGroupHostLabelService) Service(path string) (Service, error) { if path == "" { return op, nil } return nil, fmt.Errorf("The path <%s> doesn't correspond to any service", path) } func (op *AffinityGroupHostLabelService) String() string { return fmt.Sprintf("AffinityGroupHostLabelService:%s", op.path) } // This service manages a collection of all host labels assigned to an affinity group. type AffinityGroupHostLabelsService struct { BaseService } func NewAffinityGroupHostLabelsService(connection *Connection, path string) *AffinityGroupHostLabelsService { var result AffinityGroupHostLabelsService result.connection = connection result.path = path return &result } // Adds a host label to the affinity group. // For example, to add the label `789` to the affinity group `456` of cluster `123`, // send a request like this: // .... // POST /ovirt-engine/api/clusters/123/affinitygroups/456/hostlabels // .... // With the following body: // [source,xml] // ---- // <affinity_label id="789"/> // ---- type AffinityGroupHostLabelsServiceAddRequest struct { AffinityGroupHostLabelsService *AffinityGroupHostLabelsService header map[string]string query map[string]string label *AffinityLabel } func (p *AffinityGroupHostLabelsServiceAddRequest) Header(key, value string) *AffinityGroupHostLabelsServiceAddRequest { if p.header == nil { p.header = make(map[string]string) } p.header[key] = value return p } func (p *AffinityGroupHostLabelsServiceAddRequest) Query(key, value string) *AffinityGroupHostLabelsServiceAddRequest { if p.query == nil { p.query = make(map[string]string) } p.query[key] = value return p } func (p *AffinityGroupHostLabelsServiceAddRequest) Label(label *AffinityLabel) *AffinityGroupHostLabelsServiceAddRequest { p.label = label return p } func (p *AffinityGroupHostLabelsServiceAddRequest) Send() (*AffinityGroupHostLabelsServiceAddResponse, error) { rawURL := fmt.Sprintf("%s%s", p.AffinityGroupHostLabelsService.connection.URL(), p.AffinityGroupHostLabelsService.path) values := make(url.Values) if p.query != nil { for k, v := range p.query { values[k] = []string{v} } } if len(values) > 0 { rawURL = fmt.Sprintf("%s?%s", rawURL, values.Encode()) } var body bytes.Buffer writer := NewXMLWriter(&body) err := XMLAffinityLabelWriteOne(writer, p.label, "") if err != nil { return nil, err } writer.Flush() req, err := http.NewRequest("POST", rawURL, &body) if err != nil { return nil, err } for hk, hv := range p.AffinityGroupHostLabelsService.connection.headers { req.Header.Add(hk, hv) } if p.header != nil { for hk, hv := range p.header { req.Header.Add(hk, hv) } } req.Header.Add("User-Agent", fmt.Sprintf("GoSDK/%s", SDK_VERSION)) req.Header.Add("Version", "4") req.Header.Add("Content-Type", "application/xml") req.Header.Add("Accept", "application/xml") // get OAuth access token token, err := p.AffinityGroupHostLabelsService.connection.authenticate() if err != nil { return nil, err } req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", token)) // Send the request and wait for the response resp, err := p.AffinityGroupHostLabelsService.connection.client.Do(req) if err != nil { return nil, err } defer resp.Body.Close() if p.AffinityGroupHostLabelsService.connection.logFunc != nil { dumpReq, err := httputil.DumpRequestOut(req, true) if err != nil { return nil, err } dumpResp, err := httputil.DumpResponse(resp, true) if err != nil { return nil, err } p.AffinityGroupHostLabelsService.connection.logFunc("<<<<<<Request:\n%sResponse:\n%s>>>>>>\n", string(dumpReq), string(dumpResp)) } respBodyBytes, errReadBody := ioutil.ReadAll(resp.Body) if errReadBody != nil { return nil, errReadBody } if !Contains(resp.StatusCode, []int{200, 201, 202}) { return nil, CheckFault(respBodyBytes, resp) } reader := NewXMLReader(respBodyBytes) result, err := XMLAffinityLabelReadOne(reader, nil, "") if err != nil { return nil, err } return &AffinityGroupHostLabelsServiceAddResponse{label: result}, nil } func (p *AffinityGroupHostLabelsServiceAddRequest) MustSend() *AffinityGroupHostLabelsServiceAddResponse { if v, err := p.Send(); err != nil { panic(err) } else { return v } } // Adds a host label to the affinity group. // For example, to add the label `789` to the affinity group `456` of cluster `123`, // send a request like this: // .... // POST /ovirt-engine/api/clusters/123/affinitygroups/456/hostlabels // .... // With the following body: // [source,xml] // ---- // <affinity_label id="789"/> // ---- type AffinityGroupHostLabelsServiceAddResponse struct { label *AffinityLabel } func (p *AffinityGroupHostLabelsServiceAddResponse) Label() (*AffinityLabel, bool) { if p.label != nil { return p.label, true } return nil, false } func (p *AffinityGroupHostLabelsServiceAddResponse) MustLabel() *AffinityLabel { if p.label == nil { panic("label in response does not exist") } return p.label } // Adds a host label to the affinity group. // For example, to add the label `789` to the affinity group `456` of cluster `123`, // send a request like this: // .... // POST /ovirt-engine/api/clusters/123/affinitygroups/456/hostlabels // .... // With the following body: // [source,xml] // ---- // <affinity_label id="789"/> // ---- func (p *AffinityGroupHostLabelsService) Add() *AffinityGroupHostLabelsServiceAddRequest { return &AffinityGroupHostLabelsServiceAddRequest{AffinityGroupHostLabelsService: p} } // List all host labels assigned to this affinity group. // The order of the returned labels isn't guaranteed. type AffinityGroupHostLabelsServiceListRequest struct { AffinityGroupHostLabelsService *AffinityGroupHostLabelsService header map[string]string query map[string]string follow *string max *int64 } func (p *AffinityGroupHostLabelsServiceListRequest) Header(key, value string) *AffinityGroupHostLabelsServiceListRequest { if p.header == nil { p.header = make(map[string]string) } p.header[key] = value return p } func (p *AffinityGroupHostLabelsServiceListRequest) Query(key, value string) *AffinityGroupHostLabelsServiceListRequest { if p.query == nil { p.query = make(map[string]string) } p.query[key] = value return p } func (p *AffinityGroupHostLabelsServiceListRequest) Follow(follow string) *AffinityGroupHostLabelsServiceListRequest { p.follow = &follow return p } func (p *AffinityGroupHostLabelsServiceListRequest) Max(max int64) *AffinityGroupHostLabelsServiceListRequest { p.max = &max return p } func (p *AffinityGroupHostLabelsServiceListRequest) Send() (*AffinityGroupHostLabelsServiceListResponse, error) { rawURL := fmt.Sprintf("%s%s", p.AffinityGroupHostLabelsService.connection.URL(), p.AffinityGroupHostLabelsService.path) values := make(url.Values) if p.follow != nil { values["follow"] = []string{fmt.Sprintf("%v", *p.follow)} } if p.max != nil { values["max"] = []string{fmt.Sprintf("%v", *p.max)} } if p.query != nil { for k, v := range p.query { values[k] = []string{v} } } if len(values) > 0 { rawURL = fmt.Sprintf("%s?%s", rawURL, values.Encode()) } req, err := http.NewRequest("GET", rawURL, nil) if err != nil { return nil, err } for hk, hv := range p.AffinityGroupHostLabelsService.connection.headers { req.Header.Add(hk, hv) } if p.header != nil { for hk, hv := range p.header { req.Header.Add(hk, hv) } } req.Header.Add("User-Agent", fmt.Sprintf("GoSDK/%s", SDK_VERSION)) req.Header.Add("Version", "4") req.Header.Add("Content-Type", "application/xml") req.Header.Add("Accept", "application/xml") // get OAuth access token token, err := p.AffinityGroupHostLabelsService.connection.authenticate() if err != nil { return nil, err } req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", token)) // Send the request and wait for the response resp, err := p.AffinityGroupHostLabelsService.connection.client.Do(req) if err != nil { return nil, err } defer resp.Body.Close() if p.AffinityGroupHostLabelsService.connection.logFunc != nil { dumpReq, err := httputil.DumpRequestOut(req, true) if err != nil { return nil, err } dumpResp, err := httputil.DumpResponse(resp, true) if err != nil { return nil, err } p.AffinityGroupHostLabelsService.connection.logFunc("<<<<<<Request:\n%sResponse:\n%s>>>>>>\n", string(dumpReq), string(dumpResp)) } respBodyBytes, errReadBody := ioutil.ReadAll(resp.Body) if errReadBody != nil { return nil, errReadBody } if !Contains(resp.StatusCode, []int{200}) { return nil, CheckFault(respBodyBytes, resp) } reader := NewXMLReader(respBodyBytes) result, err := XMLAffinityLabelReadMany(reader, nil) if err != nil { return nil, err } return &AffinityGroupHostLabelsServiceListResponse{labels: result}, nil } func (p *AffinityGroupHostLabelsServiceListRequest) MustSend() *AffinityGroupHostLabelsServiceListResponse { if v, err := p.Send(); err != nil { panic(err) } else { return v } } // List all host labels assigned to this affinity group. // The order of the returned labels isn't guaranteed. type AffinityGroupHostLabelsServiceListResponse struct { labels *AffinityLabelSlice } func (p *AffinityGroupHostLabelsServiceListResponse) Labels() (*AffinityLabelSlice, bool) { if p.labels != nil { return p.labels, true } return nil, false } func (p *AffinityGroupHostLabelsServiceListResponse) MustLabels() *AffinityLabelSlice { if p.labels == nil { panic("labels in response does not exist") } return p.labels } // List all host labels assigned to this affinity group. // The order of the returned labels isn't guaranteed. func (p *AffinityGroupHostLabelsService) List() *AffinityGroupHostLabelsServiceListRequest { return &AffinityGroupHostLabelsServiceListRequest{AffinityGroupHostLabelsService: p} } // Access the service that manages the host label assignment to this affinity group. func (op *AffinityGroupHostLabelsService) LabelService(id string) *AffinityGroupHostLabelService { return NewAffinityGroupHostLabelService(op.connection, fmt.Sprintf("%s/%s", op.path, id)) } // Service locator method, returns individual service on which the URI is dispatched. func (op *AffinityGroupHostLabelsService) Service(path string) (Service, error) { if path == "" { return op, nil } index := strings.Index(path, "/") if index == -1 { return op.LabelService(path), nil } return op.LabelService(path[:index]).Service(path[index+1:]) } func (op *AffinityGroupHostLabelsService) String() string { return fmt.Sprintf("AffinityGroupHostLabelsService:%s", op.path) } // This service manages a single host to affinity group assignment. type AffinityGroupHostService struct { BaseService } func NewAffinityGroupHostService(connection *Connection, path string) *AffinityGroupHostService { var result AffinityGroupHostService result.connection = connection result.path = path return &result } // Remove host from the affinity group. type AffinityGroupHostServiceRemoveRequest struct { AffinityGroupHostService *AffinityGroupHostService header map[string]string query map[string]string async *bool } func (p *AffinityGroupHostServiceRemoveRequest) Header(key, value string) *AffinityGroupHostServiceRemoveRequest { if p.header == nil { p.header = make(map[string]string) } p.header[key] = value return p } func (p *AffinityGroupHostServiceRemoveRequest) Query(key, value string) *AffinityGroupHostServiceRemoveRequest { if p.query == nil { p.query = make(map[string]string) } p.query[key] = value return p } func (p *AffinityGroupHostServiceRemoveRequest) Async(async bool) *AffinityGroupHostServiceRemoveRequest { p.async = &async return p } func (p *AffinityGroupHostServiceRemoveRequest) Send() (*AffinityGroupHostServiceRemoveResponse, error) { rawURL := fmt.Sprintf("%s%s", p.AffinityGroupHostService.connection.URL(), p.AffinityGroupHostService.path) values := make(url.Values) if p.async != nil { values["async"] = []string{fmt.Sprintf("%v", *p.async)} } if p.query != nil { for k, v := range p.query { values[k] = []string{v} } } if len(values) > 0 { rawURL = fmt.Sprintf("%s?%s", rawURL, values.Encode()) } req, err := http.NewRequest("DELETE", rawURL, nil) if err != nil { return nil, err } for hk, hv := range p.AffinityGroupHostService.connection.headers { req.Header.Add(hk, hv) } if p.header != nil { for hk, hv := range p.header { req.Header.Add(hk, hv) } } req.Header.Add("User-Agent", fmt.Sprintf("GoSDK/%s", SDK_VERSION)) req.Header.Add("Version", "4") req.Header.Add("Content-Type", "application/xml") req.Header.Add("Accept", "application/xml") // get OAuth access token token, err := p.AffinityGroupHostService.connection.authenticate() if err != nil { return nil, err } req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", token)) // Send the request and wait for the response resp, err := p.AffinityGroupHostService.connection.client.Do(req) if err != nil { return nil, err } defer resp.Body.Close() if p.AffinityGroupHostService.connection.logFunc != nil { dumpReq, err := httputil.DumpRequestOut(req, true) if err != nil { return nil, err } dumpResp, err := httputil.DumpResponse(resp, true) if err != nil { return nil, err } p.AffinityGroupHostService.connection.logFunc("<<<<<<Request:\n%sResponse:\n%s>>>>>>\n", string(dumpReq), string(dumpResp)) } respBodyBytes, errReadBody := ioutil.ReadAll(resp.Body) if errReadBody != nil { return nil, errReadBody } if !Contains(resp.StatusCode, []int{200}) { return nil, CheckFault(respBodyBytes, resp) } return new(AffinityGroupHostServiceRemoveResponse), nil } func (p *AffinityGroupHostServiceRemoveRequest) MustSend() *AffinityGroupHostServiceRemoveResponse { if v, err := p.Send(); err != nil { panic(err) } else { return v } } // Remove host from the affinity group. type AffinityGroupHostServiceRemoveResponse struct { } // Remove host from the affinity group. func (p *AffinityGroupHostService) Remove() *AffinityGroupHostServiceRemoveRequest { return &AffinityGroupHostServiceRemoveRequest{AffinityGroupHostService: p} } // Service locator method, returns individual service on which the URI is dispatched. func (op *AffinityGroupHostService) Service(path string) (Service, error) { if path == "" { return op, nil } return nil, fmt.Errorf("The path <%s> doesn't correspond to any service", path) } func (op *AffinityGroupHostService) String() string { return fmt.Sprintf("AffinityGroupHostService:%s", op.path) } // This service manages a collection of all hosts assigned to an affinity group. type AffinityGroupHostsService struct { BaseService } func NewAffinityGroupHostsService(connection *Connection, path string) *AffinityGroupHostsService { var result AffinityGroupHostsService result.connection = connection result.path = path return &result } // Adds a host to the affinity group. // For example, to add the host `789` to the affinity group `456` of cluster `123`, send a request like // this: // .... // POST /ovirt-engine/api/clusters/123/affinitygroups/456/hosts // .... // With the following body: // [source,xml] // ---- // <host id="789"/> // ---- type AffinityGroupHostsServiceAddRequest struct { AffinityGroupHostsService *AffinityGroupHostsService header map[string]string query map[string]string host *Host } func (p *AffinityGroupHostsServiceAddRequest) Header(key, value string) *AffinityGroupHostsServiceAddRequest { if p.header == nil { p.header = make(map[string]string) } p.header[key] = value return p } func (p *AffinityGroupHostsServiceAddRequest) Query(key, value string) *AffinityGroupHostsServiceAddRequest { if p.query == nil { p.query = make(map[string]string) } p.query[key] = value return p } func (p *AffinityGroupHostsServiceAddRequest) Host(host *Host) *AffinityGroupHostsServiceAddRequest { p.host = host return p } func (p *AffinityGroupHostsServiceAddRequest) Send() (*AffinityGroupHostsServiceAddResponse, error) { rawURL := fmt.Sprintf("%s%s", p.AffinityGroupHostsService.connection.URL(), p.AffinityGroupHostsService.path) values := make(url.Values) if p.query != nil { for k, v := range p.query { values[k] = []string{v} } } if len(values) > 0 { rawURL = fmt.Sprintf("%s?%s", rawURL, values.Encode()) } var body bytes.Buffer writer := NewXMLWriter(&body) err := XMLHostWriteOne(writer, p.host, "") if err != nil { return nil, err } writer.Flush() req, err := http.NewRequest("POST", rawURL, &body) if err != nil { return nil, err } for hk, hv := range p.AffinityGroupHostsService.connection.headers { req.Header.Add(hk, hv) } if p.header != nil { for hk, hv := range p.header { req.Header.Add(hk, hv) } } req.Header.Add("User-Agent", fmt.Sprintf("GoSDK/%s", SDK_VERSION)) req.Header.Add("Version", "4") req.Header.Add("Content-Type", "application/xml") req.Header.Add("Accept", "application/xml") // get OAuth access token token, err := p.AffinityGroupHostsService.connection.authenticate() if err != nil { return nil, err } req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", token)) // Send the request and wait for the response resp, err := p.AffinityGroupHostsService.connection.client.Do(req) if err != nil { return nil, err } defer resp.Body.Close() if p.AffinityGroupHostsService.connection.logFunc != nil { dumpReq, err := httputil.DumpRequestOut(req, true) if err != nil { return nil, err } dumpResp, err := httputil.DumpResponse(resp, true) if err != nil { return nil, err } p.AffinityGroupHostsService.connection.logFunc("<<<<<<Request:\n%sResponse:\n%s>>>>>>\n", string(dumpReq), string(dumpResp)) } respBodyBytes, errReadBody := ioutil.ReadAll(resp.Body) if errReadBody != nil { return nil, errReadBody } if !Contains(resp.StatusCode, []int{200, 201, 202}) { return nil, CheckFault(respBodyBytes, resp) } reader := NewXMLReader(respBodyBytes) result, err := XMLHostReadOne(reader, nil, "") if err != nil { return nil, err } return &AffinityGroupHostsServiceAddResponse{host: result}, nil } func (p *AffinityGroupHostsServiceAddRequest) MustSend() *AffinityGroupHostsServiceAddResponse { if v, err := p.Send(); err != nil { panic(err) } else { return v } } // Adds a host to the affinity group. // For example, to add the host `789` to the affinity group `456` of cluster `123`, send a request like // this: // .... // POST /ovirt-engine/api/clusters/123/affinitygroups/456/hosts // .... // With the following body: // [source,xml] // ---- // <host id="789"/> // ---- type AffinityGroupHostsServiceAddResponse struct { host *Host } func (p *AffinityGroupHostsServiceAddResponse) Host() (*Host, bool) { if p.host != nil { return p.host, true } return nil, false } func (p *AffinityGroupHostsServiceAddResponse) MustHost() *Host { if p.host == nil { panic("host in response does not exist") } return p.host } // Adds a host to the affinity group. // For example, to add the host `789` to the affinity group `456` of cluster `123`, send a request like // this: // .... // POST /ovirt-engine/api/clusters/123/affinitygroups/456/hosts // .... // With the following body: // [source,xml] // ---- // <host id="789"/> // ---- func (p *AffinityGroupHostsService) Add() *AffinityGroupHostsServiceAddRequest { return &AffinityGroupHostsServiceAddRequest{AffinityGroupHostsService: p} } // List all hosts assigned to this affinity group. // The order of the returned hosts isn't guaranteed. type AffinityGroupHostsServiceListRequest struct { AffinityGroupHostsService *AffinityGroupHostsService header map[string]string query map[string]string follow *string max *int64 } func (p *AffinityGroupHostsServiceListRequest) Header(key, value string) *AffinityGroupHostsServiceListRequest { if p.header == nil { p.header = make(map[string]string) } p.header[key] = value return p } func (p *AffinityGroupHostsServiceListRequest) Query(key, value string) *AffinityGroupHostsServiceListRequest { if p.query == nil { p.query = make(map[string]string) } p.query[key] = value return p } func (p *AffinityGroupHostsServiceListRequest) Follow(follow string) *AffinityGroupHostsServiceListRequest { p.follow = &follow return p } func (p *AffinityGroupHostsServiceListRequest) Max(max int64) *AffinityGroupHostsServiceListRequest { p.max = &max return p } func (p *AffinityGroupHostsServiceListRequest) Send() (*AffinityGroupHostsServiceListResponse, error) { rawURL := fmt.Sprintf("%s%s", p.AffinityGroupHostsService.connection.URL(), p.AffinityGroupHostsService.path) values := make(url.Values) if p.follow != nil { values["follow"] = []string{fmt.Sprintf("%v", *p.follow)} } if p.max != nil { values["max"] = []string{fmt.Sprintf("%v", *p.max)} } if p.query != nil { for k, v := range p.query { values[k] = []string{v} } } if len(values) > 0 { rawURL = fmt.Sprintf("%s?%s", rawURL, values.Encode()) } req, err := http.NewRequest("GET", rawURL, nil) if err != nil { return nil, err } for hk, hv := range p.AffinityGroupHostsService.connection.headers { req.Header.Add(hk, hv) } if p.header != nil { for hk, hv := range p.header { req.Header.Add(hk, hv) } } req.Header.Add("User-Agent", fmt.Sprintf("GoSDK/%s", SDK_VERSION)) req.Header.Add("Version", "4") req.Header.Add("Content-Type", "application/xml") req.Header.Add("Accept", "application/xml") // get OAuth access token token, err := p.AffinityGroupHostsService.connection.authenticate() if err != nil { return nil, err } req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", token)) // Send the request and wait for the response resp, err := p.AffinityGroupHostsService.connection.client.Do(req) if err != nil { return nil, err } defer resp.Body.Close() if p.AffinityGroupHostsService.connection.logFunc != nil { dumpReq, err := httputil.DumpRequestOut(req, true) if err != nil { return nil, err } dumpResp, err := httputil.DumpResponse(resp, true) if err != nil { return nil, err } p.AffinityGroupHostsService.connection.logFunc("<<<<<<Request:\n%sResponse:\n%s>>>>>>\n", string(dumpReq), string(dumpResp)) } respBodyBytes, errReadBody := ioutil.ReadAll(resp.Body) if errReadBody != nil { return nil, errReadBody } if !Contains(resp.StatusCode, []int{200}) { return nil, CheckFault(respBodyBytes, resp) } reader := NewXMLReader(respBodyBytes) result, err := XMLHostReadMany(reader, nil) if err != nil { return nil, err } return &AffinityGroupHostsServiceListResponse{hosts: result}, nil } func (p *AffinityGroupHostsServiceListRequest) MustSend() *AffinityGroupHostsServiceListResponse { if v, err := p.Send(); err != nil { panic(err) } else { return v } } // List all hosts assigned to this affinity group. // The order of the returned hosts isn't guaranteed. type AffinityGroupHostsServiceListResponse struct { hosts *HostSlice } func (p *AffinityGroupHostsServiceListResponse) Hosts() (*HostSlice, bool) { if p.hosts != nil { return p.hosts, true } return nil, false } func (p *AffinityGroupHostsServiceListResponse) MustHosts() *HostSlice { if p.hosts == nil { panic("hosts in response does not exist") } return p.hosts } // List all hosts assigned to this affinity group. // The order of the returned hosts isn't guaranteed. func (p *AffinityGroupHostsService) List() *AffinityGroupHostsServiceListRequest { return &AffinityGroupHostsServiceListRequest{AffinityGroupHostsService: p} } // Access the service that manages the host assignment to this affinity group. func (op *AffinityGroupHostsService) HostService(id string) *AffinityGroupHostService { return NewAffinityGroupHostService(op.connection, fmt.Sprintf("%s/%s", op.path, id)) } // Service locator method, returns individual service on which the URI is dispatched. func (op *AffinityGroupHostsService) Service(path string) (Service, error) { if path == "" { return op, nil } index := strings.Index(path, "/") if index == -1 { return op.HostService(path), nil } return op.HostService(path[:index]).Service(path[index+1:]) } func (op *AffinityGroupHostsService) String() string { return fmt.Sprintf("AffinityGroupHostsService:%s", op.path) } // This service manages a single affinity group. type AffinityGroupService struct { BaseService } func NewAffinityGroupService(connection *Connection, path string) *AffinityGroupService { var result AffinityGroupService result.connection = connection result.path = path return &result } // Retrieve the affinity group details. // [source,xml] // ---- // <affinity_group id="00000000-0000-0000-0000-000000000000"> // // <name>AF_GROUP_001</name> // <cluster id="00000000-0000-0000-0000-000000000000"/> // <positive>true</positive> // <enforcing>true</enforcing> // // </affinity_group> // ---- type AffinityGroupServiceGetRequest struct { AffinityGroupService *AffinityGroupService header map[string]string query map[string]string follow *string } func (p *AffinityGroupServiceGetRequest) Header(key, value string) *AffinityGroupServiceGetRequest { if p.header == nil { p.header = make(map[string]string) } p.header[key] = value return p } func (p *AffinityGroupServiceGetRequest) Query(key, value string) *AffinityGroupServiceGetRequest { if p.query == nil { p.query = make(map[string]string) } p.query[key] = value return p } func (p *AffinityGroupServiceGetRequest) Follow(follow string) *AffinityGroupServiceGetRequest { p.follow = &follow return p } func (p *AffinityGroupServiceGetRequest) Send() (*AffinityGroupServiceGetResponse, error) { rawURL := fmt.Sprintf("%s%s", p.AffinityGroupService.connection.URL(), p.AffinityGroupService.path) values := make(url.Values) if p.follow != nil { values["follow"] = []string{fmt.Sprintf("%v", *p.follow)} } if p.query != nil { for k, v := range p.query { values[k] = []string{v} } } if len(values) > 0 { rawURL = fmt.Sprintf("%s?%s", rawURL, values.Encode()) }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
true
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/ovirt/go-ovirt/version.go
vendor/github.com/ovirt/go-ovirt/version.go
// Copyright (c) 2017 Joey <majunjiev@gmail.com>. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package ovirtsdk // The version of the SDK: var SDK_VERSION = "4.4.3"
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/google/pprof/profile/merge.go
vendor/github.com/google/pprof/profile/merge.go
// Copyright 2014 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package profile import ( "encoding/binary" "fmt" "sort" "strconv" "strings" ) // Compact performs garbage collection on a profile to remove any // unreferenced fields. This is useful to reduce the size of a profile // after samples or locations have been removed. func (p *Profile) Compact() *Profile { p, _ = Merge([]*Profile{p}) return p } // Merge merges all the profiles in profs into a single Profile. // Returns a new profile independent of the input profiles. The merged // profile is compacted to eliminate unused samples, locations, // functions and mappings. Profiles must have identical profile sample // and period types or the merge will fail. profile.Period of the // resulting profile will be the maximum of all profiles, and // profile.TimeNanos will be the earliest nonzero one. Merges are // associative with the caveat of the first profile having some // specialization in how headers are combined. There may be other // subtleties now or in the future regarding associativity. func Merge(srcs []*Profile) (*Profile, error) { if len(srcs) == 0 { return nil, fmt.Errorf("no profiles to merge") } p, err := combineHeaders(srcs) if err != nil { return nil, err } pm := &profileMerger{ p: p, samples: make(map[sampleKey]*Sample, len(srcs[0].Sample)), locations: make(map[locationKey]*Location, len(srcs[0].Location)), functions: make(map[functionKey]*Function, len(srcs[0].Function)), mappings: make(map[mappingKey]*Mapping, len(srcs[0].Mapping)), } for _, src := range srcs { // Clear the profile-specific hash tables pm.locationsByID = makeLocationIDMap(len(src.Location)) pm.functionsByID = make(map[uint64]*Function, len(src.Function)) pm.mappingsByID = make(map[uint64]mapInfo, len(src.Mapping)) if len(pm.mappings) == 0 && len(src.Mapping) > 0 { // The Mapping list has the property that the first mapping // represents the main binary. Take the first Mapping we see, // otherwise the operations below will add mappings in an // arbitrary order. pm.mapMapping(src.Mapping[0]) } for _, s := range src.Sample { if !isZeroSample(s) { pm.mapSample(s) } } } for _, s := range p.Sample { if isZeroSample(s) { // If there are any zero samples, re-merge the profile to GC // them. return Merge([]*Profile{p}) } } return p, nil } // Normalize normalizes the source profile by multiplying each value in profile by the // ratio of the sum of the base profile's values of that sample type to the sum of the // source profile's value of that sample type. func (p *Profile) Normalize(pb *Profile) error { if err := p.compatible(pb); err != nil { return err } baseVals := make([]int64, len(p.SampleType)) for _, s := range pb.Sample { for i, v := range s.Value { baseVals[i] += v } } srcVals := make([]int64, len(p.SampleType)) for _, s := range p.Sample { for i, v := range s.Value { srcVals[i] += v } } normScale := make([]float64, len(baseVals)) for i := range baseVals { if srcVals[i] == 0 { normScale[i] = 0.0 } else { normScale[i] = float64(baseVals[i]) / float64(srcVals[i]) } } p.ScaleN(normScale) return nil } func isZeroSample(s *Sample) bool { for _, v := range s.Value { if v != 0 { return false } } return true } type profileMerger struct { p *Profile // Memoization tables within a profile. locationsByID locationIDMap functionsByID map[uint64]*Function mappingsByID map[uint64]mapInfo // Memoization tables for profile entities. samples map[sampleKey]*Sample locations map[locationKey]*Location functions map[functionKey]*Function mappings map[mappingKey]*Mapping } type mapInfo struct { m *Mapping offset int64 } func (pm *profileMerger) mapSample(src *Sample) *Sample { // Check memoization table k := pm.sampleKey(src) if ss, ok := pm.samples[k]; ok { for i, v := range src.Value { ss.Value[i] += v } return ss } // Make new sample. s := &Sample{ Location: make([]*Location, len(src.Location)), Value: make([]int64, len(src.Value)), Label: make(map[string][]string, len(src.Label)), NumLabel: make(map[string][]int64, len(src.NumLabel)), NumUnit: make(map[string][]string, len(src.NumLabel)), } for i, l := range src.Location { s.Location[i] = pm.mapLocation(l) } for k, v := range src.Label { vv := make([]string, len(v)) copy(vv, v) s.Label[k] = vv } for k, v := range src.NumLabel { u := src.NumUnit[k] vv := make([]int64, len(v)) uu := make([]string, len(u)) copy(vv, v) copy(uu, u) s.NumLabel[k] = vv s.NumUnit[k] = uu } copy(s.Value, src.Value) pm.samples[k] = s pm.p.Sample = append(pm.p.Sample, s) return s } func (pm *profileMerger) sampleKey(sample *Sample) sampleKey { // Accumulate contents into a string. var buf strings.Builder buf.Grow(64) // Heuristic to avoid extra allocs // encode a number putNumber := func(v uint64) { var num [binary.MaxVarintLen64]byte n := binary.PutUvarint(num[:], v) buf.Write(num[:n]) } // encode a string prefixed with its length. putDelimitedString := func(s string) { putNumber(uint64(len(s))) buf.WriteString(s) } for _, l := range sample.Location { // Get the location in the merged profile, which may have a different ID. if loc := pm.mapLocation(l); loc != nil { putNumber(loc.ID) } } putNumber(0) // Delimiter for _, l := range sortedKeys1(sample.Label) { putDelimitedString(l) values := sample.Label[l] putNumber(uint64(len(values))) for _, v := range values { putDelimitedString(v) } } for _, l := range sortedKeys2(sample.NumLabel) { putDelimitedString(l) values := sample.NumLabel[l] putNumber(uint64(len(values))) for _, v := range values { putNumber(uint64(v)) } units := sample.NumUnit[l] putNumber(uint64(len(units))) for _, v := range units { putDelimitedString(v) } } return sampleKey(buf.String()) } type sampleKey string // sortedKeys1 returns the sorted keys found in a string->[]string map. // // Note: this is currently non-generic since github pprof runs golint, // which does not support generics. When that issue is fixed, it can // be merged with sortedKeys2 and made into a generic function. func sortedKeys1(m map[string][]string) []string { if len(m) == 0 { return nil } keys := make([]string, 0, len(m)) for k := range m { keys = append(keys, k) } sort.Strings(keys) return keys } // sortedKeys2 returns the sorted keys found in a string->[]int64 map. // // Note: this is currently non-generic since github pprof runs golint, // which does not support generics. When that issue is fixed, it can // be merged with sortedKeys1 and made into a generic function. func sortedKeys2(m map[string][]int64) []string { if len(m) == 0 { return nil } keys := make([]string, 0, len(m)) for k := range m { keys = append(keys, k) } sort.Strings(keys) return keys } func (pm *profileMerger) mapLocation(src *Location) *Location { if src == nil { return nil } if l := pm.locationsByID.get(src.ID); l != nil { return l } mi := pm.mapMapping(src.Mapping) l := &Location{ ID: uint64(len(pm.p.Location) + 1), Mapping: mi.m, Address: uint64(int64(src.Address) + mi.offset), Line: make([]Line, len(src.Line)), IsFolded: src.IsFolded, } for i, ln := range src.Line { l.Line[i] = pm.mapLine(ln) } // Check memoization table. Must be done on the remapped location to // account for the remapped mapping ID. k := l.key() if ll, ok := pm.locations[k]; ok { pm.locationsByID.set(src.ID, ll) return ll } pm.locationsByID.set(src.ID, l) pm.locations[k] = l pm.p.Location = append(pm.p.Location, l) return l } // key generates locationKey to be used as a key for maps. func (l *Location) key() locationKey { key := locationKey{ addr: l.Address, isFolded: l.IsFolded, } if l.Mapping != nil { // Normalizes address to handle address space randomization. key.addr -= l.Mapping.Start key.mappingID = l.Mapping.ID } lines := make([]string, len(l.Line)*3) for i, line := range l.Line { if line.Function != nil { lines[i*2] = strconv.FormatUint(line.Function.ID, 16) } lines[i*2+1] = strconv.FormatInt(line.Line, 16) lines[i*2+2] = strconv.FormatInt(line.Column, 16) } key.lines = strings.Join(lines, "|") return key } type locationKey struct { addr, mappingID uint64 lines string isFolded bool } func (pm *profileMerger) mapMapping(src *Mapping) mapInfo { if src == nil { return mapInfo{} } if mi, ok := pm.mappingsByID[src.ID]; ok { return mi } // Check memoization tables. mk := src.key() if m, ok := pm.mappings[mk]; ok { mi := mapInfo{m, int64(m.Start) - int64(src.Start)} pm.mappingsByID[src.ID] = mi return mi } m := &Mapping{ ID: uint64(len(pm.p.Mapping) + 1), Start: src.Start, Limit: src.Limit, Offset: src.Offset, File: src.File, KernelRelocationSymbol: src.KernelRelocationSymbol, BuildID: src.BuildID, HasFunctions: src.HasFunctions, HasFilenames: src.HasFilenames, HasLineNumbers: src.HasLineNumbers, HasInlineFrames: src.HasInlineFrames, } pm.p.Mapping = append(pm.p.Mapping, m) // Update memoization tables. pm.mappings[mk] = m mi := mapInfo{m, 0} pm.mappingsByID[src.ID] = mi return mi } // key generates encoded strings of Mapping to be used as a key for // maps. func (m *Mapping) key() mappingKey { // Normalize addresses to handle address space randomization. // Round up to next 4K boundary to avoid minor discrepancies. const mapsizeRounding = 0x1000 size := m.Limit - m.Start size = size + mapsizeRounding - 1 size = size - (size % mapsizeRounding) key := mappingKey{ size: size, offset: m.Offset, } switch { case m.BuildID != "": key.buildIDOrFile = m.BuildID case m.File != "": key.buildIDOrFile = m.File default: // A mapping containing neither build ID nor file name is a fake mapping. A // key with empty buildIDOrFile is used for fake mappings so that they are // treated as the same mapping during merging. } return key } type mappingKey struct { size, offset uint64 buildIDOrFile string } func (pm *profileMerger) mapLine(src Line) Line { ln := Line{ Function: pm.mapFunction(src.Function), Line: src.Line, Column: src.Column, } return ln } func (pm *profileMerger) mapFunction(src *Function) *Function { if src == nil { return nil } if f, ok := pm.functionsByID[src.ID]; ok { return f } k := src.key() if f, ok := pm.functions[k]; ok { pm.functionsByID[src.ID] = f return f } f := &Function{ ID: uint64(len(pm.p.Function) + 1), Name: src.Name, SystemName: src.SystemName, Filename: src.Filename, StartLine: src.StartLine, } pm.functions[k] = f pm.functionsByID[src.ID] = f pm.p.Function = append(pm.p.Function, f) return f } // key generates a struct to be used as a key for maps. func (f *Function) key() functionKey { return functionKey{ f.StartLine, f.Name, f.SystemName, f.Filename, } } type functionKey struct { startLine int64 name, systemName, fileName string } // combineHeaders checks that all profiles can be merged and returns // their combined profile. func combineHeaders(srcs []*Profile) (*Profile, error) { for _, s := range srcs[1:] { if err := srcs[0].compatible(s); err != nil { return nil, err } } var timeNanos, durationNanos, period int64 var comments []string seenComments := map[string]bool{} var docURL string var defaultSampleType string for _, s := range srcs { if timeNanos == 0 || s.TimeNanos < timeNanos { timeNanos = s.TimeNanos } durationNanos += s.DurationNanos if period == 0 || period < s.Period { period = s.Period } for _, c := range s.Comments { if seen := seenComments[c]; !seen { comments = append(comments, c) seenComments[c] = true } } if defaultSampleType == "" { defaultSampleType = s.DefaultSampleType } if docURL == "" { docURL = s.DocURL } } p := &Profile{ SampleType: make([]*ValueType, len(srcs[0].SampleType)), DropFrames: srcs[0].DropFrames, KeepFrames: srcs[0].KeepFrames, TimeNanos: timeNanos, DurationNanos: durationNanos, PeriodType: srcs[0].PeriodType, Period: period, Comments: comments, DefaultSampleType: defaultSampleType, DocURL: docURL, } copy(p.SampleType, srcs[0].SampleType) return p, nil } // compatible determines if two profiles can be compared/merged. // returns nil if the profiles are compatible; otherwise an error with // details on the incompatibility. func (p *Profile) compatible(pb *Profile) error { if !equalValueType(p.PeriodType, pb.PeriodType) { return fmt.Errorf("incompatible period types %v and %v", p.PeriodType, pb.PeriodType) } if len(p.SampleType) != len(pb.SampleType) { return fmt.Errorf("incompatible sample types %v and %v", p.SampleType, pb.SampleType) } for i := range p.SampleType { if !equalValueType(p.SampleType[i], pb.SampleType[i]) { return fmt.Errorf("incompatible sample types %v and %v", p.SampleType, pb.SampleType) } } return nil } // equalValueType returns true if the two value types are semantically // equal. It ignores the internal fields used during encode/decode. func equalValueType(st1, st2 *ValueType) bool { return st1.Type == st2.Type && st1.Unit == st2.Unit } // locationIDMap is like a map[uint64]*Location, but provides efficiency for // ids that are densely numbered, which is often the case. type locationIDMap struct { dense []*Location // indexed by id for id < len(dense) sparse map[uint64]*Location // indexed by id for id >= len(dense) } func makeLocationIDMap(n int) locationIDMap { return locationIDMap{ dense: make([]*Location, n), sparse: map[uint64]*Location{}, } } func (lm locationIDMap) get(id uint64) *Location { if id < uint64(len(lm.dense)) { return lm.dense[int(id)] } return lm.sparse[id] } func (lm locationIDMap) set(id uint64, loc *Location) { if id < uint64(len(lm.dense)) { lm.dense[id] = loc return } lm.sparse[id] = loc } // CompatibilizeSampleTypes makes profiles compatible to be compared/merged. It // keeps sample types that appear in all profiles only and drops/reorders the // sample types as necessary. // // In the case of sample types order is not the same for given profiles the // order is derived from the first profile. // // Profiles are modified in-place. // // It returns an error if the sample type's intersection is empty. func CompatibilizeSampleTypes(ps []*Profile) error { sTypes := commonSampleTypes(ps) if len(sTypes) == 0 { return fmt.Errorf("profiles have empty common sample type list") } for _, p := range ps { if err := compatibilizeSampleTypes(p, sTypes); err != nil { return err } } return nil } // commonSampleTypes returns sample types that appear in all profiles in the // order how they ordered in the first profile. func commonSampleTypes(ps []*Profile) []string { if len(ps) == 0 { return nil } sTypes := map[string]int{} for _, p := range ps { for _, st := range p.SampleType { sTypes[st.Type]++ } } var res []string for _, st := range ps[0].SampleType { if sTypes[st.Type] == len(ps) { res = append(res, st.Type) } } return res } // compatibilizeSampleTypes drops sample types that are not present in sTypes // list and reorder them if needed. // // It sets DefaultSampleType to sType[0] if it is not in sType list. // // It assumes that all sample types from the sTypes list are present in the // given profile otherwise it returns an error. func compatibilizeSampleTypes(p *Profile, sTypes []string) error { if len(sTypes) == 0 { return fmt.Errorf("sample type list is empty") } defaultSampleType := sTypes[0] reMap, needToModify := make([]int, len(sTypes)), false for i, st := range sTypes { if st == p.DefaultSampleType { defaultSampleType = p.DefaultSampleType } idx := searchValueType(p.SampleType, st) if idx < 0 { return fmt.Errorf("%q sample type is not found in profile", st) } reMap[i] = idx if idx != i { needToModify = true } } if !needToModify && len(sTypes) == len(p.SampleType) { return nil } p.DefaultSampleType = defaultSampleType oldSampleTypes := p.SampleType p.SampleType = make([]*ValueType, len(sTypes)) for i, idx := range reMap { p.SampleType[i] = oldSampleTypes[idx] } values := make([]int64, len(sTypes)) for _, s := range p.Sample { for i, idx := range reMap { values[i] = s.Value[idx] } s.Value = s.Value[:len(values)] copy(s.Value, values) } return nil } func searchValueType(vts []*ValueType, s string) int { for i, vt := range vts { if vt.Type == s { return i } } return -1 }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/google/pprof/profile/index.go
vendor/github.com/google/pprof/profile/index.go
// Copyright 2016 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package profile import ( "fmt" "strconv" "strings" ) // SampleIndexByName returns the appropriate index for a value of sample index. // If numeric, it returns the number, otherwise it looks up the text in the // profile sample types. func (p *Profile) SampleIndexByName(sampleIndex string) (int, error) { if sampleIndex == "" { if dst := p.DefaultSampleType; dst != "" { for i, t := range sampleTypes(p) { if t == dst { return i, nil } } } // By default select the last sample value return len(p.SampleType) - 1, nil } if i, err := strconv.Atoi(sampleIndex); err == nil { if i < 0 || i >= len(p.SampleType) { return 0, fmt.Errorf("sample_index %s is outside the range [0..%d]", sampleIndex, len(p.SampleType)-1) } return i, nil } // Remove the inuse_ prefix to support legacy pprof options // "inuse_space" and "inuse_objects" for profiles containing types // "space" and "objects". noInuse := strings.TrimPrefix(sampleIndex, "inuse_") for i, t := range p.SampleType { if t.Type == sampleIndex || t.Type == noInuse { return i, nil } } return 0, fmt.Errorf("sample_index %q must be one of: %v", sampleIndex, sampleTypes(p)) } func sampleTypes(p *Profile) []string { types := make([]string, len(p.SampleType)) for i, t := range p.SampleType { types[i] = t.Type } return types }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/google/pprof/profile/profile.go
vendor/github.com/google/pprof/profile/profile.go
// Copyright 2014 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package profile provides a representation of profile.proto and // methods to encode/decode profiles in this format. package profile import ( "bytes" "compress/gzip" "fmt" "io" "math" "path/filepath" "regexp" "sort" "strings" "sync" "time" ) // Profile is an in-memory representation of profile.proto. type Profile struct { SampleType []*ValueType DefaultSampleType string Sample []*Sample Mapping []*Mapping Location []*Location Function []*Function Comments []string DocURL string DropFrames string KeepFrames string TimeNanos int64 DurationNanos int64 PeriodType *ValueType Period int64 // The following fields are modified during encoding and copying, // so are protected by a Mutex. encodeMu sync.Mutex commentX []int64 docURLX int64 dropFramesX int64 keepFramesX int64 stringTable []string defaultSampleTypeX int64 } // ValueType corresponds to Profile.ValueType type ValueType struct { Type string // cpu, wall, inuse_space, etc Unit string // seconds, nanoseconds, bytes, etc typeX int64 unitX int64 } // Sample corresponds to Profile.Sample type Sample struct { Location []*Location Value []int64 // Label is a per-label-key map to values for string labels. // // In general, having multiple values for the given label key is strongly // discouraged - see docs for the sample label field in profile.proto. The // main reason this unlikely state is tracked here is to make the // decoding->encoding roundtrip not lossy. But we expect that the value // slices present in this map are always of length 1. Label map[string][]string // NumLabel is a per-label-key map to values for numeric labels. See a note // above on handling multiple values for a label. NumLabel map[string][]int64 // NumUnit is a per-label-key map to the unit names of corresponding numeric // label values. The unit info may be missing even if the label is in // NumLabel, see the docs in profile.proto for details. When the value is // slice is present and not nil, its length must be equal to the length of // the corresponding value slice in NumLabel. NumUnit map[string][]string locationIDX []uint64 labelX []label } // label corresponds to Profile.Label type label struct { keyX int64 // Exactly one of the two following values must be set strX int64 numX int64 // Integer value for this label // can be set if numX has value unitX int64 } // Mapping corresponds to Profile.Mapping type Mapping struct { ID uint64 Start uint64 Limit uint64 Offset uint64 File string BuildID string HasFunctions bool HasFilenames bool HasLineNumbers bool HasInlineFrames bool fileX int64 buildIDX int64 // Name of the kernel relocation symbol ("_text" or "_stext"), extracted from File. // For linux kernel mappings generated by some tools, correct symbolization depends // on knowing which of the two possible relocation symbols was used for `Start`. // This is given to us as a suffix in `File` (e.g. "[kernel.kallsyms]_stext"). // // Note, this public field is not persisted in the proto. For the purposes of // copying / merging / hashing profiles, it is considered subsumed by `File`. KernelRelocationSymbol string } // Location corresponds to Profile.Location type Location struct { ID uint64 Mapping *Mapping Address uint64 Line []Line IsFolded bool mappingIDX uint64 } // Line corresponds to Profile.Line type Line struct { Function *Function Line int64 Column int64 functionIDX uint64 } // Function corresponds to Profile.Function type Function struct { ID uint64 Name string SystemName string Filename string StartLine int64 nameX int64 systemNameX int64 filenameX int64 } // Parse parses a profile and checks for its validity. The input // may be a gzip-compressed encoded protobuf or one of many legacy // profile formats which may be unsupported in the future. func Parse(r io.Reader) (*Profile, error) { data, err := io.ReadAll(r) if err != nil { return nil, err } return ParseData(data) } // ParseData parses a profile from a buffer and checks for its // validity. func ParseData(data []byte) (*Profile, error) { var p *Profile var err error if len(data) >= 2 && data[0] == 0x1f && data[1] == 0x8b { gz, err := gzip.NewReader(bytes.NewBuffer(data)) if err == nil { data, err = io.ReadAll(gz) } if err != nil { return nil, fmt.Errorf("decompressing profile: %v", err) } } if p, err = ParseUncompressed(data); err != nil && err != errNoData && err != errConcatProfile { p, err = parseLegacy(data) } if err != nil { return nil, fmt.Errorf("parsing profile: %v", err) } if err := p.CheckValid(); err != nil { return nil, fmt.Errorf("malformed profile: %v", err) } return p, nil } var errUnrecognized = fmt.Errorf("unrecognized profile format") var errMalformed = fmt.Errorf("malformed profile format") var errNoData = fmt.Errorf("empty input file") var errConcatProfile = fmt.Errorf("concatenated profiles detected") func parseLegacy(data []byte) (*Profile, error) { parsers := []func([]byte) (*Profile, error){ parseCPU, parseHeap, parseGoCount, // goroutine, threadcreate parseThread, parseContention, parseJavaProfile, } for _, parser := range parsers { p, err := parser(data) if err == nil { p.addLegacyFrameInfo() return p, nil } if err != errUnrecognized { return nil, err } } return nil, errUnrecognized } // ParseUncompressed parses an uncompressed protobuf into a profile. func ParseUncompressed(data []byte) (*Profile, error) { if len(data) == 0 { return nil, errNoData } p := &Profile{} if err := unmarshal(data, p); err != nil { return nil, err } if err := p.postDecode(); err != nil { return nil, err } return p, nil } var libRx = regexp.MustCompile(`([.]so$|[.]so[._][0-9]+)`) // massageMappings applies heuristic-based changes to the profile // mappings to account for quirks of some environments. func (p *Profile) massageMappings() { // Merge adjacent regions with matching names, checking that the offsets match if len(p.Mapping) > 1 { mappings := []*Mapping{p.Mapping[0]} for _, m := range p.Mapping[1:] { lm := mappings[len(mappings)-1] if adjacent(lm, m) { lm.Limit = m.Limit if m.File != "" { lm.File = m.File } if m.BuildID != "" { lm.BuildID = m.BuildID } p.updateLocationMapping(m, lm) continue } mappings = append(mappings, m) } p.Mapping = mappings } // Use heuristics to identify main binary and move it to the top of the list of mappings for i, m := range p.Mapping { file := strings.TrimSpace(strings.Replace(m.File, "(deleted)", "", -1)) if len(file) == 0 { continue } if len(libRx.FindStringSubmatch(file)) > 0 { continue } if file[0] == '[' { continue } // Swap what we guess is main to position 0. p.Mapping[0], p.Mapping[i] = p.Mapping[i], p.Mapping[0] break } // Keep the mapping IDs neatly sorted for i, m := range p.Mapping { m.ID = uint64(i + 1) } } // adjacent returns whether two mapping entries represent the same // mapping that has been split into two. Check that their addresses are adjacent, // and if the offsets match, if they are available. func adjacent(m1, m2 *Mapping) bool { if m1.File != "" && m2.File != "" { if m1.File != m2.File { return false } } if m1.BuildID != "" && m2.BuildID != "" { if m1.BuildID != m2.BuildID { return false } } if m1.Limit != m2.Start { return false } if m1.Offset != 0 && m2.Offset != 0 { offset := m1.Offset + (m1.Limit - m1.Start) if offset != m2.Offset { return false } } return true } func (p *Profile) updateLocationMapping(from, to *Mapping) { for _, l := range p.Location { if l.Mapping == from { l.Mapping = to } } } func serialize(p *Profile) []byte { p.encodeMu.Lock() p.preEncode() b := marshal(p) p.encodeMu.Unlock() return b } // Write writes the profile as a gzip-compressed marshaled protobuf. func (p *Profile) Write(w io.Writer) error { zw := gzip.NewWriter(w) defer zw.Close() _, err := zw.Write(serialize(p)) return err } // WriteUncompressed writes the profile as a marshaled protobuf. func (p *Profile) WriteUncompressed(w io.Writer) error { _, err := w.Write(serialize(p)) return err } // CheckValid tests whether the profile is valid. Checks include, but are // not limited to: // - len(Profile.Sample[n].value) == len(Profile.value_unit) // - Sample.id has a corresponding Profile.Location func (p *Profile) CheckValid() error { // Check that sample values are consistent sampleLen := len(p.SampleType) if sampleLen == 0 && len(p.Sample) != 0 { return fmt.Errorf("missing sample type information") } for _, s := range p.Sample { if s == nil { return fmt.Errorf("profile has nil sample") } if len(s.Value) != sampleLen { return fmt.Errorf("mismatch: sample has %d values vs. %d types", len(s.Value), len(p.SampleType)) } for _, l := range s.Location { if l == nil { return fmt.Errorf("sample has nil location") } } } // Check that all mappings/locations/functions are in the tables // Check that there are no duplicate ids mappings := make(map[uint64]*Mapping, len(p.Mapping)) for _, m := range p.Mapping { if m == nil { return fmt.Errorf("profile has nil mapping") } if m.ID == 0 { return fmt.Errorf("found mapping with reserved ID=0") } if mappings[m.ID] != nil { return fmt.Errorf("multiple mappings with same id: %d", m.ID) } mappings[m.ID] = m } functions := make(map[uint64]*Function, len(p.Function)) for _, f := range p.Function { if f == nil { return fmt.Errorf("profile has nil function") } if f.ID == 0 { return fmt.Errorf("found function with reserved ID=0") } if functions[f.ID] != nil { return fmt.Errorf("multiple functions with same id: %d", f.ID) } functions[f.ID] = f } locations := make(map[uint64]*Location, len(p.Location)) for _, l := range p.Location { if l == nil { return fmt.Errorf("profile has nil location") } if l.ID == 0 { return fmt.Errorf("found location with reserved id=0") } if locations[l.ID] != nil { return fmt.Errorf("multiple locations with same id: %d", l.ID) } locations[l.ID] = l if m := l.Mapping; m != nil { if m.ID == 0 || mappings[m.ID] != m { return fmt.Errorf("inconsistent mapping %p: %d", m, m.ID) } } for _, ln := range l.Line { f := ln.Function if f == nil { return fmt.Errorf("location id: %d has a line with nil function", l.ID) } if f.ID == 0 || functions[f.ID] != f { return fmt.Errorf("inconsistent function %p: %d", f, f.ID) } } } return nil } // Aggregate merges the locations in the profile into equivalence // classes preserving the request attributes. It also updates the // samples to point to the merged locations. func (p *Profile) Aggregate(inlineFrame, function, filename, linenumber, columnnumber, address bool) error { for _, m := range p.Mapping { m.HasInlineFrames = m.HasInlineFrames && inlineFrame m.HasFunctions = m.HasFunctions && function m.HasFilenames = m.HasFilenames && filename m.HasLineNumbers = m.HasLineNumbers && linenumber } // Aggregate functions if !function || !filename { for _, f := range p.Function { if !function { f.Name = "" f.SystemName = "" } if !filename { f.Filename = "" } } } // Aggregate locations if !inlineFrame || !address || !linenumber || !columnnumber { for _, l := range p.Location { if !inlineFrame && len(l.Line) > 1 { l.Line = l.Line[len(l.Line)-1:] } if !linenumber { for i := range l.Line { l.Line[i].Line = 0 l.Line[i].Column = 0 } } if !columnnumber { for i := range l.Line { l.Line[i].Column = 0 } } if !address { l.Address = 0 } } } return p.CheckValid() } // NumLabelUnits returns a map of numeric label keys to the units // associated with those keys and a map of those keys to any units // that were encountered but not used. // Unit for a given key is the first encountered unit for that key. If multiple // units are encountered for values paired with a particular key, then the first // unit encountered is used and all other units are returned in sorted order // in map of ignored units. // If no units are encountered for a particular key, the unit is then inferred // based on the key. func (p *Profile) NumLabelUnits() (map[string]string, map[string][]string) { numLabelUnits := map[string]string{} ignoredUnits := map[string]map[string]bool{} encounteredKeys := map[string]bool{} // Determine units based on numeric tags for each sample. for _, s := range p.Sample { for k := range s.NumLabel { encounteredKeys[k] = true for _, unit := range s.NumUnit[k] { if unit == "" { continue } if wantUnit, ok := numLabelUnits[k]; !ok { numLabelUnits[k] = unit } else if wantUnit != unit { if v, ok := ignoredUnits[k]; ok { v[unit] = true } else { ignoredUnits[k] = map[string]bool{unit: true} } } } } } // Infer units for keys without any units associated with // numeric tag values. for key := range encounteredKeys { unit := numLabelUnits[key] if unit == "" { switch key { case "alignment", "request": numLabelUnits[key] = "bytes" default: numLabelUnits[key] = key } } } // Copy ignored units into more readable format unitsIgnored := make(map[string][]string, len(ignoredUnits)) for key, values := range ignoredUnits { units := make([]string, len(values)) i := 0 for unit := range values { units[i] = unit i++ } sort.Strings(units) unitsIgnored[key] = units } return numLabelUnits, unitsIgnored } // String dumps a text representation of a profile. Intended mainly // for debugging purposes. func (p *Profile) String() string { ss := make([]string, 0, len(p.Comments)+len(p.Sample)+len(p.Mapping)+len(p.Location)) for _, c := range p.Comments { ss = append(ss, "Comment: "+c) } if url := p.DocURL; url != "" { ss = append(ss, fmt.Sprintf("Doc: %s", url)) } if pt := p.PeriodType; pt != nil { ss = append(ss, fmt.Sprintf("PeriodType: %s %s", pt.Type, pt.Unit)) } ss = append(ss, fmt.Sprintf("Period: %d", p.Period)) if p.TimeNanos != 0 { ss = append(ss, fmt.Sprintf("Time: %v", time.Unix(0, p.TimeNanos))) } if p.DurationNanos != 0 { ss = append(ss, fmt.Sprintf("Duration: %.4v", time.Duration(p.DurationNanos))) } ss = append(ss, "Samples:") var sh1 string for _, s := range p.SampleType { dflt := "" if s.Type == p.DefaultSampleType { dflt = "[dflt]" } sh1 = sh1 + fmt.Sprintf("%s/%s%s ", s.Type, s.Unit, dflt) } ss = append(ss, strings.TrimSpace(sh1)) for _, s := range p.Sample { ss = append(ss, s.string()) } ss = append(ss, "Locations") for _, l := range p.Location { ss = append(ss, l.string()) } ss = append(ss, "Mappings") for _, m := range p.Mapping { ss = append(ss, m.string()) } return strings.Join(ss, "\n") + "\n" } // string dumps a text representation of a mapping. Intended mainly // for debugging purposes. func (m *Mapping) string() string { bits := "" if m.HasFunctions { bits = bits + "[FN]" } if m.HasFilenames { bits = bits + "[FL]" } if m.HasLineNumbers { bits = bits + "[LN]" } if m.HasInlineFrames { bits = bits + "[IN]" } return fmt.Sprintf("%d: %#x/%#x/%#x %s %s %s", m.ID, m.Start, m.Limit, m.Offset, m.File, m.BuildID, bits) } // string dumps a text representation of a location. Intended mainly // for debugging purposes. func (l *Location) string() string { ss := []string{} locStr := fmt.Sprintf("%6d: %#x ", l.ID, l.Address) if m := l.Mapping; m != nil { locStr = locStr + fmt.Sprintf("M=%d ", m.ID) } if l.IsFolded { locStr = locStr + "[F] " } if len(l.Line) == 0 { ss = append(ss, locStr) } for li := range l.Line { lnStr := "??" if fn := l.Line[li].Function; fn != nil { lnStr = fmt.Sprintf("%s %s:%d:%d s=%d", fn.Name, fn.Filename, l.Line[li].Line, l.Line[li].Column, fn.StartLine) if fn.Name != fn.SystemName { lnStr = lnStr + "(" + fn.SystemName + ")" } } ss = append(ss, locStr+lnStr) // Do not print location details past the first line locStr = " " } return strings.Join(ss, "\n") } // string dumps a text representation of a sample. Intended mainly // for debugging purposes. func (s *Sample) string() string { ss := []string{} var sv string for _, v := range s.Value { sv = fmt.Sprintf("%s %10d", sv, v) } sv = sv + ": " for _, l := range s.Location { sv = sv + fmt.Sprintf("%d ", l.ID) } ss = append(ss, sv) const labelHeader = " " if len(s.Label) > 0 { ss = append(ss, labelHeader+labelsToString(s.Label)) } if len(s.NumLabel) > 0 { ss = append(ss, labelHeader+numLabelsToString(s.NumLabel, s.NumUnit)) } return strings.Join(ss, "\n") } // labelsToString returns a string representation of a // map representing labels. func labelsToString(labels map[string][]string) string { ls := []string{} for k, v := range labels { ls = append(ls, fmt.Sprintf("%s:%v", k, v)) } sort.Strings(ls) return strings.Join(ls, " ") } // numLabelsToString returns a string representation of a map // representing numeric labels. func numLabelsToString(numLabels map[string][]int64, numUnits map[string][]string) string { ls := []string{} for k, v := range numLabels { units := numUnits[k] var labelString string if len(units) == len(v) { values := make([]string, len(v)) for i, vv := range v { values[i] = fmt.Sprintf("%d %s", vv, units[i]) } labelString = fmt.Sprintf("%s:%v", k, values) } else { labelString = fmt.Sprintf("%s:%v", k, v) } ls = append(ls, labelString) } sort.Strings(ls) return strings.Join(ls, " ") } // SetLabel sets the specified key to the specified value for all samples in the // profile. func (p *Profile) SetLabel(key string, value []string) { for _, sample := range p.Sample { if sample.Label == nil { sample.Label = map[string][]string{key: value} } else { sample.Label[key] = value } } } // RemoveLabel removes all labels associated with the specified key for all // samples in the profile. func (p *Profile) RemoveLabel(key string) { for _, sample := range p.Sample { delete(sample.Label, key) } } // HasLabel returns true if a sample has a label with indicated key and value. func (s *Sample) HasLabel(key, value string) bool { for _, v := range s.Label[key] { if v == value { return true } } return false } // SetNumLabel sets the specified key to the specified value for all samples in the // profile. "unit" is a slice that describes the units that each corresponding member // of "values" is measured in (e.g. bytes or seconds). If there is no relevant // unit for a given value, that member of "unit" should be the empty string. // "unit" must either have the same length as "value", or be nil. func (p *Profile) SetNumLabel(key string, value []int64, unit []string) { for _, sample := range p.Sample { if sample.NumLabel == nil { sample.NumLabel = map[string][]int64{key: value} } else { sample.NumLabel[key] = value } if sample.NumUnit == nil { sample.NumUnit = map[string][]string{key: unit} } else { sample.NumUnit[key] = unit } } } // RemoveNumLabel removes all numerical labels associated with the specified key for all // samples in the profile. func (p *Profile) RemoveNumLabel(key string) { for _, sample := range p.Sample { delete(sample.NumLabel, key) delete(sample.NumUnit, key) } } // DiffBaseSample returns true if a sample belongs to the diff base and false // otherwise. func (s *Sample) DiffBaseSample() bool { return s.HasLabel("pprof::base", "true") } // Scale multiplies all sample values in a profile by a constant and keeps // only samples that have at least one non-zero value. func (p *Profile) Scale(ratio float64) { if ratio == 1 { return } ratios := make([]float64, len(p.SampleType)) for i := range p.SampleType { ratios[i] = ratio } p.ScaleN(ratios) } // ScaleN multiplies each sample values in a sample by a different amount // and keeps only samples that have at least one non-zero value. func (p *Profile) ScaleN(ratios []float64) error { if len(p.SampleType) != len(ratios) { return fmt.Errorf("mismatched scale ratios, got %d, want %d", len(ratios), len(p.SampleType)) } allOnes := true for _, r := range ratios { if r != 1 { allOnes = false break } } if allOnes { return nil } fillIdx := 0 for _, s := range p.Sample { keepSample := false for i, v := range s.Value { if ratios[i] != 1 { val := int64(math.Round(float64(v) * ratios[i])) s.Value[i] = val keepSample = keepSample || val != 0 } } if keepSample { p.Sample[fillIdx] = s fillIdx++ } } p.Sample = p.Sample[:fillIdx] return nil } // HasFunctions determines if all locations in this profile have // symbolized function information. func (p *Profile) HasFunctions() bool { for _, l := range p.Location { if l.Mapping != nil && !l.Mapping.HasFunctions { return false } } return true } // HasFileLines determines if all locations in this profile have // symbolized file and line number information. func (p *Profile) HasFileLines() bool { for _, l := range p.Location { if l.Mapping != nil && (!l.Mapping.HasFilenames || !l.Mapping.HasLineNumbers) { return false } } return true } // Unsymbolizable returns true if a mapping points to a binary for which // locations can't be symbolized in principle, at least now. Examples are // "[vdso]", "[vsyscall]" and some others, see the code. func (m *Mapping) Unsymbolizable() bool { name := filepath.Base(m.File) return strings.HasPrefix(name, "[") || strings.HasPrefix(name, "linux-vdso") || strings.HasPrefix(m.File, "/dev/dri/") || m.File == "//anon" } // Copy makes a fully independent copy of a profile. func (p *Profile) Copy() *Profile { pp := &Profile{} if err := unmarshal(serialize(p), pp); err != nil { panic(err) } if err := pp.postDecode(); err != nil { panic(err) } return pp }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/google/pprof/profile/filter.go
vendor/github.com/google/pprof/profile/filter.go
// Copyright 2014 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package profile // Implements methods to filter samples from profiles. import "regexp" // FilterSamplesByName filters the samples in a profile and only keeps // samples where at least one frame matches focus but none match ignore. // Returns true is the corresponding regexp matched at least one sample. func (p *Profile) FilterSamplesByName(focus, ignore, hide, show *regexp.Regexp) (fm, im, hm, hnm bool) { if focus == nil && ignore == nil && hide == nil && show == nil { fm = true // Missing focus implies a match return } focusOrIgnore := make(map[uint64]bool) hidden := make(map[uint64]bool) for _, l := range p.Location { if ignore != nil && l.matchesName(ignore) { im = true focusOrIgnore[l.ID] = false } else if focus == nil || l.matchesName(focus) { fm = true focusOrIgnore[l.ID] = true } if hide != nil && l.matchesName(hide) { hm = true l.Line = l.unmatchedLines(hide) if len(l.Line) == 0 { hidden[l.ID] = true } } if show != nil { l.Line = l.matchedLines(show) if len(l.Line) == 0 { hidden[l.ID] = true } else { hnm = true } } } s := make([]*Sample, 0, len(p.Sample)) for _, sample := range p.Sample { if focusedAndNotIgnored(sample.Location, focusOrIgnore) { if len(hidden) > 0 { var locs []*Location for _, loc := range sample.Location { if !hidden[loc.ID] { locs = append(locs, loc) } } if len(locs) == 0 { // Remove sample with no locations (by not adding it to s). continue } sample.Location = locs } s = append(s, sample) } } p.Sample = s return } // ShowFrom drops all stack frames above the highest matching frame and returns // whether a match was found. If showFrom is nil it returns false and does not // modify the profile. // // Example: consider a sample with frames [A, B, C, B], where A is the root. // ShowFrom(nil) returns false and has frames [A, B, C, B]. // ShowFrom(A) returns true and has frames [A, B, C, B]. // ShowFrom(B) returns true and has frames [B, C, B]. // ShowFrom(C) returns true and has frames [C, B]. // ShowFrom(D) returns false and drops the sample because no frames remain. func (p *Profile) ShowFrom(showFrom *regexp.Regexp) (matched bool) { if showFrom == nil { return false } // showFromLocs stores location IDs that matched ShowFrom. showFromLocs := make(map[uint64]bool) // Apply to locations. for _, loc := range p.Location { if filterShowFromLocation(loc, showFrom) { showFromLocs[loc.ID] = true matched = true } } // For all samples, strip locations after the highest matching one. s := make([]*Sample, 0, len(p.Sample)) for _, sample := range p.Sample { for i := len(sample.Location) - 1; i >= 0; i-- { if showFromLocs[sample.Location[i].ID] { sample.Location = sample.Location[:i+1] s = append(s, sample) break } } } p.Sample = s return matched } // filterShowFromLocation tests a showFrom regex against a location, removes // lines after the last match and returns whether a match was found. If the // mapping is matched, then all lines are kept. func filterShowFromLocation(loc *Location, showFrom *regexp.Regexp) bool { if m := loc.Mapping; m != nil && showFrom.MatchString(m.File) { return true } if i := loc.lastMatchedLineIndex(showFrom); i >= 0 { loc.Line = loc.Line[:i+1] return true } return false } // lastMatchedLineIndex returns the index of the last line that matches a regex, // or -1 if no match is found. func (loc *Location) lastMatchedLineIndex(re *regexp.Regexp) int { for i := len(loc.Line) - 1; i >= 0; i-- { if fn := loc.Line[i].Function; fn != nil { if re.MatchString(fn.Name) || re.MatchString(fn.Filename) { return i } } } return -1 } // FilterTagsByName filters the tags in a profile and only keeps // tags that match show and not hide. func (p *Profile) FilterTagsByName(show, hide *regexp.Regexp) (sm, hm bool) { matchRemove := func(name string) bool { matchShow := show == nil || show.MatchString(name) matchHide := hide != nil && hide.MatchString(name) if matchShow { sm = true } if matchHide { hm = true } return !matchShow || matchHide } for _, s := range p.Sample { for lab := range s.Label { if matchRemove(lab) { delete(s.Label, lab) } } for lab := range s.NumLabel { if matchRemove(lab) { delete(s.NumLabel, lab) } } } return } // matchesName returns whether the location matches the regular // expression. It checks any available function names, file names, and // mapping object filename. func (loc *Location) matchesName(re *regexp.Regexp) bool { for _, ln := range loc.Line { if fn := ln.Function; fn != nil { if re.MatchString(fn.Name) || re.MatchString(fn.Filename) { return true } } } if m := loc.Mapping; m != nil && re.MatchString(m.File) { return true } return false } // unmatchedLines returns the lines in the location that do not match // the regular expression. func (loc *Location) unmatchedLines(re *regexp.Regexp) []Line { if m := loc.Mapping; m != nil && re.MatchString(m.File) { return nil } var lines []Line for _, ln := range loc.Line { if fn := ln.Function; fn != nil { if re.MatchString(fn.Name) || re.MatchString(fn.Filename) { continue } } lines = append(lines, ln) } return lines } // matchedLines returns the lines in the location that match // the regular expression. func (loc *Location) matchedLines(re *regexp.Regexp) []Line { if m := loc.Mapping; m != nil && re.MatchString(m.File) { return loc.Line } var lines []Line for _, ln := range loc.Line { if fn := ln.Function; fn != nil { if !re.MatchString(fn.Name) && !re.MatchString(fn.Filename) { continue } } lines = append(lines, ln) } return lines } // focusedAndNotIgnored looks up a slice of ids against a map of // focused/ignored locations. The map only contains locations that are // explicitly focused or ignored. Returns whether there is at least // one focused location but no ignored locations. func focusedAndNotIgnored(locs []*Location, m map[uint64]bool) bool { var f bool for _, loc := range locs { if focus, focusOrIgnore := m[loc.ID]; focusOrIgnore { if focus { // Found focused location. Must keep searching in case there // is an ignored one as well. f = true } else { // Found ignored location. Can return false right away. return false } } } return f } // TagMatch selects tags for filtering type TagMatch func(s *Sample) bool // FilterSamplesByTag removes all samples from the profile, except // those that match focus and do not match the ignore regular // expression. func (p *Profile) FilterSamplesByTag(focus, ignore TagMatch) (fm, im bool) { samples := make([]*Sample, 0, len(p.Sample)) for _, s := range p.Sample { focused, ignored := true, false if focus != nil { focused = focus(s) } if ignore != nil { ignored = ignore(s) } fm = fm || focused im = im || ignored if focused && !ignored { samples = append(samples, s) } } p.Sample = samples return }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/google/pprof/profile/proto.go
vendor/github.com/google/pprof/profile/proto.go
// Copyright 2014 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // This file is a simple protocol buffer encoder and decoder. // The format is described at // https://developers.google.com/protocol-buffers/docs/encoding // // A protocol message must implement the message interface: // decoder() []decoder // encode(*buffer) // // The decode method returns a slice indexed by field number that gives the // function to decode that field. // The encode method encodes its receiver into the given buffer. // // The two methods are simple enough to be implemented by hand rather than // by using a protocol compiler. // // See profile.go for examples of messages implementing this interface. // // There is no support for groups, message sets, or "has" bits. package profile import ( "errors" "fmt" ) type buffer struct { field int // field tag typ int // proto wire type code for field u64 uint64 data []byte tmp [16]byte tmpLines []Line // temporary storage used while decoding "repeated Line". } type decoder func(*buffer, message) error type message interface { decoder() []decoder encode(*buffer) } func marshal(m message) []byte { var b buffer m.encode(&b) return b.data } func encodeVarint(b *buffer, x uint64) { for x >= 128 { b.data = append(b.data, byte(x)|0x80) x >>= 7 } b.data = append(b.data, byte(x)) } func encodeLength(b *buffer, tag int, len int) { encodeVarint(b, uint64(tag)<<3|2) encodeVarint(b, uint64(len)) } func encodeUint64(b *buffer, tag int, x uint64) { // append varint to b.data encodeVarint(b, uint64(tag)<<3) encodeVarint(b, x) } func encodeUint64s(b *buffer, tag int, x []uint64) { if len(x) > 2 { // Use packed encoding n1 := len(b.data) for _, u := range x { encodeVarint(b, u) } n2 := len(b.data) encodeLength(b, tag, n2-n1) n3 := len(b.data) copy(b.tmp[:], b.data[n2:n3]) copy(b.data[n1+(n3-n2):], b.data[n1:n2]) copy(b.data[n1:], b.tmp[:n3-n2]) return } for _, u := range x { encodeUint64(b, tag, u) } } func encodeUint64Opt(b *buffer, tag int, x uint64) { if x == 0 { return } encodeUint64(b, tag, x) } func encodeInt64(b *buffer, tag int, x int64) { u := uint64(x) encodeUint64(b, tag, u) } func encodeInt64s(b *buffer, tag int, x []int64) { if len(x) > 2 { // Use packed encoding n1 := len(b.data) for _, u := range x { encodeVarint(b, uint64(u)) } n2 := len(b.data) encodeLength(b, tag, n2-n1) n3 := len(b.data) copy(b.tmp[:], b.data[n2:n3]) copy(b.data[n1+(n3-n2):], b.data[n1:n2]) copy(b.data[n1:], b.tmp[:n3-n2]) return } for _, u := range x { encodeInt64(b, tag, u) } } func encodeInt64Opt(b *buffer, tag int, x int64) { if x == 0 { return } encodeInt64(b, tag, x) } func encodeString(b *buffer, tag int, x string) { encodeLength(b, tag, len(x)) b.data = append(b.data, x...) } func encodeStrings(b *buffer, tag int, x []string) { for _, s := range x { encodeString(b, tag, s) } } func encodeBool(b *buffer, tag int, x bool) { if x { encodeUint64(b, tag, 1) } else { encodeUint64(b, tag, 0) } } func encodeBoolOpt(b *buffer, tag int, x bool) { if x { encodeBool(b, tag, x) } } func encodeMessage(b *buffer, tag int, m message) { n1 := len(b.data) m.encode(b) n2 := len(b.data) encodeLength(b, tag, n2-n1) n3 := len(b.data) copy(b.tmp[:], b.data[n2:n3]) copy(b.data[n1+(n3-n2):], b.data[n1:n2]) copy(b.data[n1:], b.tmp[:n3-n2]) } func unmarshal(data []byte, m message) (err error) { b := buffer{data: data, typ: 2} return decodeMessage(&b, m) } func le64(p []byte) uint64 { return uint64(p[0]) | uint64(p[1])<<8 | uint64(p[2])<<16 | uint64(p[3])<<24 | uint64(p[4])<<32 | uint64(p[5])<<40 | uint64(p[6])<<48 | uint64(p[7])<<56 } func le32(p []byte) uint32 { return uint32(p[0]) | uint32(p[1])<<8 | uint32(p[2])<<16 | uint32(p[3])<<24 } func decodeVarint(data []byte) (uint64, []byte, error) { var u uint64 for i := 0; ; i++ { if i >= 10 || i >= len(data) { return 0, nil, errors.New("bad varint") } u |= uint64(data[i]&0x7F) << uint(7*i) if data[i]&0x80 == 0 { return u, data[i+1:], nil } } } func decodeField(b *buffer, data []byte) ([]byte, error) { x, data, err := decodeVarint(data) if err != nil { return nil, err } b.field = int(x >> 3) b.typ = int(x & 7) b.data = nil b.u64 = 0 switch b.typ { case 0: b.u64, data, err = decodeVarint(data) if err != nil { return nil, err } case 1: if len(data) < 8 { return nil, errors.New("not enough data") } b.u64 = le64(data[:8]) data = data[8:] case 2: var n uint64 n, data, err = decodeVarint(data) if err != nil { return nil, err } if n > uint64(len(data)) { return nil, errors.New("too much data") } b.data = data[:n] data = data[n:] case 5: if len(data) < 4 { return nil, errors.New("not enough data") } b.u64 = uint64(le32(data[:4])) data = data[4:] default: return nil, fmt.Errorf("unknown wire type: %d", b.typ) } return data, nil } func checkType(b *buffer, typ int) error { if b.typ != typ { return errors.New("type mismatch") } return nil } func decodeMessage(b *buffer, m message) error { if err := checkType(b, 2); err != nil { return err } dec := m.decoder() data := b.data for len(data) > 0 { // pull varint field# + type var err error data, err = decodeField(b, data) if err != nil { return err } if b.field >= len(dec) || dec[b.field] == nil { continue } if err := dec[b.field](b, m); err != nil { return err } } return nil } func decodeInt64(b *buffer, x *int64) error { if err := checkType(b, 0); err != nil { return err } *x = int64(b.u64) return nil } func decodeInt64s(b *buffer, x *[]int64) error { if b.typ == 2 { // Packed encoding data := b.data for len(data) > 0 { var u uint64 var err error if u, data, err = decodeVarint(data); err != nil { return err } *x = append(*x, int64(u)) } return nil } var i int64 if err := decodeInt64(b, &i); err != nil { return err } *x = append(*x, i) return nil } func decodeUint64(b *buffer, x *uint64) error { if err := checkType(b, 0); err != nil { return err } *x = b.u64 return nil } func decodeUint64s(b *buffer, x *[]uint64) error { if b.typ == 2 { data := b.data // Packed encoding for len(data) > 0 { var u uint64 var err error if u, data, err = decodeVarint(data); err != nil { return err } *x = append(*x, u) } return nil } var u uint64 if err := decodeUint64(b, &u); err != nil { return err } *x = append(*x, u) return nil } func decodeString(b *buffer, x *string) error { if err := checkType(b, 2); err != nil { return err } *x = string(b.data) return nil } func decodeStrings(b *buffer, x *[]string) error { var s string if err := decodeString(b, &s); err != nil { return err } *x = append(*x, s) return nil } func decodeBool(b *buffer, x *bool) error { if err := checkType(b, 0); err != nil { return err } if int64(b.u64) == 0 { *x = false } else { *x = true } return nil }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/google/pprof/profile/encode.go
vendor/github.com/google/pprof/profile/encode.go
// Copyright 2014 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package profile import ( "errors" "sort" "strings" ) func (p *Profile) decoder() []decoder { return profileDecoder } // preEncode populates the unexported fields to be used by encode // (with suffix X) from the corresponding exported fields. The // exported fields are cleared up to facilitate testing. func (p *Profile) preEncode() { strings := make(map[string]int) addString(strings, "") for _, st := range p.SampleType { st.typeX = addString(strings, st.Type) st.unitX = addString(strings, st.Unit) } for _, s := range p.Sample { s.labelX = nil var keys []string for k := range s.Label { keys = append(keys, k) } sort.Strings(keys) for _, k := range keys { vs := s.Label[k] for _, v := range vs { s.labelX = append(s.labelX, label{ keyX: addString(strings, k), strX: addString(strings, v), }, ) } } var numKeys []string for k := range s.NumLabel { numKeys = append(numKeys, k) } sort.Strings(numKeys) for _, k := range numKeys { keyX := addString(strings, k) vs := s.NumLabel[k] units := s.NumUnit[k] for i, v := range vs { var unitX int64 if len(units) != 0 { unitX = addString(strings, units[i]) } s.labelX = append(s.labelX, label{ keyX: keyX, numX: v, unitX: unitX, }, ) } } s.locationIDX = make([]uint64, len(s.Location)) for i, loc := range s.Location { s.locationIDX[i] = loc.ID } } for _, m := range p.Mapping { m.fileX = addString(strings, m.File) m.buildIDX = addString(strings, m.BuildID) } for _, l := range p.Location { for i, ln := range l.Line { if ln.Function != nil { l.Line[i].functionIDX = ln.Function.ID } else { l.Line[i].functionIDX = 0 } } if l.Mapping != nil { l.mappingIDX = l.Mapping.ID } else { l.mappingIDX = 0 } } for _, f := range p.Function { f.nameX = addString(strings, f.Name) f.systemNameX = addString(strings, f.SystemName) f.filenameX = addString(strings, f.Filename) } p.dropFramesX = addString(strings, p.DropFrames) p.keepFramesX = addString(strings, p.KeepFrames) if pt := p.PeriodType; pt != nil { pt.typeX = addString(strings, pt.Type) pt.unitX = addString(strings, pt.Unit) } p.commentX = nil for _, c := range p.Comments { p.commentX = append(p.commentX, addString(strings, c)) } p.defaultSampleTypeX = addString(strings, p.DefaultSampleType) p.docURLX = addString(strings, p.DocURL) p.stringTable = make([]string, len(strings)) for s, i := range strings { p.stringTable[i] = s } } func (p *Profile) encode(b *buffer) { for _, x := range p.SampleType { encodeMessage(b, 1, x) } for _, x := range p.Sample { encodeMessage(b, 2, x) } for _, x := range p.Mapping { encodeMessage(b, 3, x) } for _, x := range p.Location { encodeMessage(b, 4, x) } for _, x := range p.Function { encodeMessage(b, 5, x) } encodeStrings(b, 6, p.stringTable) encodeInt64Opt(b, 7, p.dropFramesX) encodeInt64Opt(b, 8, p.keepFramesX) encodeInt64Opt(b, 9, p.TimeNanos) encodeInt64Opt(b, 10, p.DurationNanos) if pt := p.PeriodType; pt != nil && (pt.typeX != 0 || pt.unitX != 0) { encodeMessage(b, 11, p.PeriodType) } encodeInt64Opt(b, 12, p.Period) encodeInt64s(b, 13, p.commentX) encodeInt64(b, 14, p.defaultSampleTypeX) encodeInt64Opt(b, 15, p.docURLX) } var profileDecoder = []decoder{ nil, // 0 // repeated ValueType sample_type = 1 func(b *buffer, m message) error { x := new(ValueType) pp := m.(*Profile) pp.SampleType = append(pp.SampleType, x) return decodeMessage(b, x) }, // repeated Sample sample = 2 func(b *buffer, m message) error { x := new(Sample) pp := m.(*Profile) pp.Sample = append(pp.Sample, x) return decodeMessage(b, x) }, // repeated Mapping mapping = 3 func(b *buffer, m message) error { x := new(Mapping) pp := m.(*Profile) pp.Mapping = append(pp.Mapping, x) return decodeMessage(b, x) }, // repeated Location location = 4 func(b *buffer, m message) error { x := new(Location) x.Line = b.tmpLines[:0] // Use shared space temporarily pp := m.(*Profile) pp.Location = append(pp.Location, x) err := decodeMessage(b, x) b.tmpLines = x.Line[:0] // Copy to shrink size and detach from shared space. x.Line = append([]Line(nil), x.Line...) return err }, // repeated Function function = 5 func(b *buffer, m message) error { x := new(Function) pp := m.(*Profile) pp.Function = append(pp.Function, x) return decodeMessage(b, x) }, // repeated string string_table = 6 func(b *buffer, m message) error { err := decodeStrings(b, &m.(*Profile).stringTable) if err != nil { return err } if m.(*Profile).stringTable[0] != "" { return errors.New("string_table[0] must be ''") } return nil }, // int64 drop_frames = 7 func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).dropFramesX) }, // int64 keep_frames = 8 func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).keepFramesX) }, // int64 time_nanos = 9 func(b *buffer, m message) error { if m.(*Profile).TimeNanos != 0 { return errConcatProfile } return decodeInt64(b, &m.(*Profile).TimeNanos) }, // int64 duration_nanos = 10 func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).DurationNanos) }, // ValueType period_type = 11 func(b *buffer, m message) error { x := new(ValueType) pp := m.(*Profile) pp.PeriodType = x return decodeMessage(b, x) }, // int64 period = 12 func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).Period) }, // repeated int64 comment = 13 func(b *buffer, m message) error { return decodeInt64s(b, &m.(*Profile).commentX) }, // int64 defaultSampleType = 14 func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).defaultSampleTypeX) }, // string doc_link = 15; func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).docURLX) }, } // postDecode takes the unexported fields populated by decode (with // suffix X) and populates the corresponding exported fields. // The unexported fields are cleared up to facilitate testing. func (p *Profile) postDecode() error { var err error mappings := make(map[uint64]*Mapping, len(p.Mapping)) mappingIds := make([]*Mapping, len(p.Mapping)+1) for _, m := range p.Mapping { m.File, err = getString(p.stringTable, &m.fileX, err) m.BuildID, err = getString(p.stringTable, &m.buildIDX, err) if m.ID < uint64(len(mappingIds)) { mappingIds[m.ID] = m } else { mappings[m.ID] = m } // If this a main linux kernel mapping with a relocation symbol suffix // ("[kernel.kallsyms]_text"), extract said suffix. // It is fairly hacky to handle at this level, but the alternatives appear even worse. const prefix = "[kernel.kallsyms]" if strings.HasPrefix(m.File, prefix) { m.KernelRelocationSymbol = m.File[len(prefix):] } } functions := make(map[uint64]*Function, len(p.Function)) functionIds := make([]*Function, len(p.Function)+1) for _, f := range p.Function { f.Name, err = getString(p.stringTable, &f.nameX, err) f.SystemName, err = getString(p.stringTable, &f.systemNameX, err) f.Filename, err = getString(p.stringTable, &f.filenameX, err) if f.ID < uint64(len(functionIds)) { functionIds[f.ID] = f } else { functions[f.ID] = f } } locations := make(map[uint64]*Location, len(p.Location)) locationIds := make([]*Location, len(p.Location)+1) for _, l := range p.Location { if id := l.mappingIDX; id < uint64(len(mappingIds)) { l.Mapping = mappingIds[id] } else { l.Mapping = mappings[id] } l.mappingIDX = 0 for i, ln := range l.Line { if id := ln.functionIDX; id != 0 { l.Line[i].functionIDX = 0 if id < uint64(len(functionIds)) { l.Line[i].Function = functionIds[id] } else { l.Line[i].Function = functions[id] } } } if l.ID < uint64(len(locationIds)) { locationIds[l.ID] = l } else { locations[l.ID] = l } } for _, st := range p.SampleType { st.Type, err = getString(p.stringTable, &st.typeX, err) st.Unit, err = getString(p.stringTable, &st.unitX, err) } // Pre-allocate space for all locations. numLocations := 0 for _, s := range p.Sample { numLocations += len(s.locationIDX) } locBuffer := make([]*Location, numLocations) for _, s := range p.Sample { if len(s.labelX) > 0 { labels := make(map[string][]string, len(s.labelX)) numLabels := make(map[string][]int64, len(s.labelX)) numUnits := make(map[string][]string, len(s.labelX)) for _, l := range s.labelX { var key, value string key, err = getString(p.stringTable, &l.keyX, err) if l.strX != 0 { value, err = getString(p.stringTable, &l.strX, err) labels[key] = append(labels[key], value) } else if l.numX != 0 || l.unitX != 0 { numValues := numLabels[key] units := numUnits[key] if l.unitX != 0 { var unit string unit, err = getString(p.stringTable, &l.unitX, err) units = padStringArray(units, len(numValues)) numUnits[key] = append(units, unit) } numLabels[key] = append(numLabels[key], l.numX) } } if len(labels) > 0 { s.Label = labels } if len(numLabels) > 0 { s.NumLabel = numLabels for key, units := range numUnits { if len(units) > 0 { numUnits[key] = padStringArray(units, len(numLabels[key])) } } s.NumUnit = numUnits } } s.Location = locBuffer[:len(s.locationIDX)] locBuffer = locBuffer[len(s.locationIDX):] for i, lid := range s.locationIDX { if lid < uint64(len(locationIds)) { s.Location[i] = locationIds[lid] } else { s.Location[i] = locations[lid] } } s.locationIDX = nil } p.DropFrames, err = getString(p.stringTable, &p.dropFramesX, err) p.KeepFrames, err = getString(p.stringTable, &p.keepFramesX, err) if pt := p.PeriodType; pt == nil { p.PeriodType = &ValueType{} } if pt := p.PeriodType; pt != nil { pt.Type, err = getString(p.stringTable, &pt.typeX, err) pt.Unit, err = getString(p.stringTable, &pt.unitX, err) } for _, i := range p.commentX { var c string c, err = getString(p.stringTable, &i, err) p.Comments = append(p.Comments, c) } p.commentX = nil p.DefaultSampleType, err = getString(p.stringTable, &p.defaultSampleTypeX, err) p.DocURL, err = getString(p.stringTable, &p.docURLX, err) p.stringTable = nil return err } // padStringArray pads arr with enough empty strings to make arr // length l when arr's length is less than l. func padStringArray(arr []string, l int) []string { if l <= len(arr) { return arr } return append(arr, make([]string, l-len(arr))...) } func (p *ValueType) decoder() []decoder { return valueTypeDecoder } func (p *ValueType) encode(b *buffer) { encodeInt64Opt(b, 1, p.typeX) encodeInt64Opt(b, 2, p.unitX) } var valueTypeDecoder = []decoder{ nil, // 0 // optional int64 type = 1 func(b *buffer, m message) error { return decodeInt64(b, &m.(*ValueType).typeX) }, // optional int64 unit = 2 func(b *buffer, m message) error { return decodeInt64(b, &m.(*ValueType).unitX) }, } func (p *Sample) decoder() []decoder { return sampleDecoder } func (p *Sample) encode(b *buffer) { encodeUint64s(b, 1, p.locationIDX) encodeInt64s(b, 2, p.Value) for _, x := range p.labelX { encodeMessage(b, 3, x) } } var sampleDecoder = []decoder{ nil, // 0 // repeated uint64 location = 1 func(b *buffer, m message) error { return decodeUint64s(b, &m.(*Sample).locationIDX) }, // repeated int64 value = 2 func(b *buffer, m message) error { return decodeInt64s(b, &m.(*Sample).Value) }, // repeated Label label = 3 func(b *buffer, m message) error { s := m.(*Sample) n := len(s.labelX) s.labelX = append(s.labelX, label{}) return decodeMessage(b, &s.labelX[n]) }, } func (p label) decoder() []decoder { return labelDecoder } func (p label) encode(b *buffer) { encodeInt64Opt(b, 1, p.keyX) encodeInt64Opt(b, 2, p.strX) encodeInt64Opt(b, 3, p.numX) encodeInt64Opt(b, 4, p.unitX) } var labelDecoder = []decoder{ nil, // 0 // optional int64 key = 1 func(b *buffer, m message) error { return decodeInt64(b, &m.(*label).keyX) }, // optional int64 str = 2 func(b *buffer, m message) error { return decodeInt64(b, &m.(*label).strX) }, // optional int64 num = 3 func(b *buffer, m message) error { return decodeInt64(b, &m.(*label).numX) }, // optional int64 num = 4 func(b *buffer, m message) error { return decodeInt64(b, &m.(*label).unitX) }, } func (p *Mapping) decoder() []decoder { return mappingDecoder } func (p *Mapping) encode(b *buffer) { encodeUint64Opt(b, 1, p.ID) encodeUint64Opt(b, 2, p.Start) encodeUint64Opt(b, 3, p.Limit) encodeUint64Opt(b, 4, p.Offset) encodeInt64Opt(b, 5, p.fileX) encodeInt64Opt(b, 6, p.buildIDX) encodeBoolOpt(b, 7, p.HasFunctions) encodeBoolOpt(b, 8, p.HasFilenames) encodeBoolOpt(b, 9, p.HasLineNumbers) encodeBoolOpt(b, 10, p.HasInlineFrames) } var mappingDecoder = []decoder{ nil, // 0 func(b *buffer, m message) error { return decodeUint64(b, &m.(*Mapping).ID) }, // optional uint64 id = 1 func(b *buffer, m message) error { return decodeUint64(b, &m.(*Mapping).Start) }, // optional uint64 memory_offset = 2 func(b *buffer, m message) error { return decodeUint64(b, &m.(*Mapping).Limit) }, // optional uint64 memory_limit = 3 func(b *buffer, m message) error { return decodeUint64(b, &m.(*Mapping).Offset) }, // optional uint64 file_offset = 4 func(b *buffer, m message) error { return decodeInt64(b, &m.(*Mapping).fileX) }, // optional int64 filename = 5 func(b *buffer, m message) error { return decodeInt64(b, &m.(*Mapping).buildIDX) }, // optional int64 build_id = 6 func(b *buffer, m message) error { return decodeBool(b, &m.(*Mapping).HasFunctions) }, // optional bool has_functions = 7 func(b *buffer, m message) error { return decodeBool(b, &m.(*Mapping).HasFilenames) }, // optional bool has_filenames = 8 func(b *buffer, m message) error { return decodeBool(b, &m.(*Mapping).HasLineNumbers) }, // optional bool has_line_numbers = 9 func(b *buffer, m message) error { return decodeBool(b, &m.(*Mapping).HasInlineFrames) }, // optional bool has_inline_frames = 10 } func (p *Location) decoder() []decoder { return locationDecoder } func (p *Location) encode(b *buffer) { encodeUint64Opt(b, 1, p.ID) encodeUint64Opt(b, 2, p.mappingIDX) encodeUint64Opt(b, 3, p.Address) for i := range p.Line { encodeMessage(b, 4, &p.Line[i]) } encodeBoolOpt(b, 5, p.IsFolded) } var locationDecoder = []decoder{ nil, // 0 func(b *buffer, m message) error { return decodeUint64(b, &m.(*Location).ID) }, // optional uint64 id = 1; func(b *buffer, m message) error { return decodeUint64(b, &m.(*Location).mappingIDX) }, // optional uint64 mapping_id = 2; func(b *buffer, m message) error { return decodeUint64(b, &m.(*Location).Address) }, // optional uint64 address = 3; func(b *buffer, m message) error { // repeated Line line = 4 pp := m.(*Location) n := len(pp.Line) pp.Line = append(pp.Line, Line{}) return decodeMessage(b, &pp.Line[n]) }, func(b *buffer, m message) error { return decodeBool(b, &m.(*Location).IsFolded) }, // optional bool is_folded = 5; } func (p *Line) decoder() []decoder { return lineDecoder } func (p *Line) encode(b *buffer) { encodeUint64Opt(b, 1, p.functionIDX) encodeInt64Opt(b, 2, p.Line) encodeInt64Opt(b, 3, p.Column) } var lineDecoder = []decoder{ nil, // 0 // optional uint64 function_id = 1 func(b *buffer, m message) error { return decodeUint64(b, &m.(*Line).functionIDX) }, // optional int64 line = 2 func(b *buffer, m message) error { return decodeInt64(b, &m.(*Line).Line) }, // optional int64 column = 3 func(b *buffer, m message) error { return decodeInt64(b, &m.(*Line).Column) }, } func (p *Function) decoder() []decoder { return functionDecoder } func (p *Function) encode(b *buffer) { encodeUint64Opt(b, 1, p.ID) encodeInt64Opt(b, 2, p.nameX) encodeInt64Opt(b, 3, p.systemNameX) encodeInt64Opt(b, 4, p.filenameX) encodeInt64Opt(b, 5, p.StartLine) } var functionDecoder = []decoder{ nil, // 0 // optional uint64 id = 1 func(b *buffer, m message) error { return decodeUint64(b, &m.(*Function).ID) }, // optional int64 function_name = 2 func(b *buffer, m message) error { return decodeInt64(b, &m.(*Function).nameX) }, // optional int64 function_system_name = 3 func(b *buffer, m message) error { return decodeInt64(b, &m.(*Function).systemNameX) }, // repeated int64 filename = 4 func(b *buffer, m message) error { return decodeInt64(b, &m.(*Function).filenameX) }, // optional int64 start_line = 5 func(b *buffer, m message) error { return decodeInt64(b, &m.(*Function).StartLine) }, } func addString(strings map[string]int, s string) int64 { i, ok := strings[s] if !ok { i = len(strings) strings[s] = i } return int64(i) } func getString(strings []string, strng *int64, err error) (string, error) { if err != nil { return "", err } s := int(*strng) if s < 0 || s >= len(strings) { return "", errMalformed } *strng = 0 return strings[s], nil }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/google/pprof/profile/legacy_profile.go
vendor/github.com/google/pprof/profile/legacy_profile.go
// Copyright 2014 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // This file implements parsers to convert legacy profiles into the // profile.proto format. package profile import ( "bufio" "bytes" "fmt" "io" "math" "regexp" "strconv" "strings" ) var ( countStartRE = regexp.MustCompile(`\A(\S+) profile: total \d+\z`) countRE = regexp.MustCompile(`\A(\d+) @(( 0x[0-9a-f]+)+)\z`) heapHeaderRE = regexp.MustCompile(`heap profile: *(\d+): *(\d+) *\[ *(\d+): *(\d+) *\] *@ *(heap[_a-z0-9]*)/?(\d*)`) heapSampleRE = regexp.MustCompile(`(-?\d+): *(-?\d+) *\[ *(\d+): *(\d+) *] @([ x0-9a-f]*)`) contentionSampleRE = regexp.MustCompile(`(\d+) *(\d+) @([ x0-9a-f]*)`) hexNumberRE = regexp.MustCompile(`0x[0-9a-f]+`) growthHeaderRE = regexp.MustCompile(`heap profile: *(\d+): *(\d+) *\[ *(\d+): *(\d+) *\] @ growthz?`) fragmentationHeaderRE = regexp.MustCompile(`heap profile: *(\d+): *(\d+) *\[ *(\d+): *(\d+) *\] @ fragmentationz?`) threadzStartRE = regexp.MustCompile(`--- threadz \d+ ---`) threadStartRE = regexp.MustCompile(`--- Thread ([[:xdigit:]]+) \(name: (.*)/(\d+)\) stack: ---`) // Regular expressions to parse process mappings. Support the format used by Linux /proc/.../maps and other tools. // Recommended format: // Start End object file name offset(optional) linker build id // 0x40000-0x80000 /path/to/binary (@FF00) abc123456 spaceDigits = `\s+[[:digit:]]+` hexPair = `\s+[[:xdigit:]]+:[[:xdigit:]]+` oSpace = `\s*` // Capturing expressions. cHex = `(?:0x)?([[:xdigit:]]+)` cHexRange = `\s*` + cHex + `[\s-]?` + oSpace + cHex + `:?` cSpaceString = `(?:\s+(\S+))?` cSpaceHex = `(?:\s+([[:xdigit:]]+))?` cSpaceAtOffset = `(?:\s+\(@([[:xdigit:]]+)\))?` cPerm = `(?:\s+([-rwxp]+))?` procMapsRE = regexp.MustCompile(`^` + cHexRange + cPerm + cSpaceHex + hexPair + spaceDigits + cSpaceString) briefMapsRE = regexp.MustCompile(`^` + cHexRange + cPerm + cSpaceString + cSpaceAtOffset + cSpaceHex) // Regular expression to parse log data, of the form: // ... file:line] msg... logInfoRE = regexp.MustCompile(`^[^\[\]]+:[0-9]+]\s`) ) func isSpaceOrComment(line string) bool { trimmed := strings.TrimSpace(line) return len(trimmed) == 0 || trimmed[0] == '#' } // parseGoCount parses a Go count profile (e.g., threadcreate or // goroutine) and returns a new Profile. func parseGoCount(b []byte) (*Profile, error) { s := bufio.NewScanner(bytes.NewBuffer(b)) // Skip comments at the beginning of the file. for s.Scan() && isSpaceOrComment(s.Text()) { } if err := s.Err(); err != nil { return nil, err } m := countStartRE.FindStringSubmatch(s.Text()) if m == nil { return nil, errUnrecognized } profileType := m[1] p := &Profile{ PeriodType: &ValueType{Type: profileType, Unit: "count"}, Period: 1, SampleType: []*ValueType{{Type: profileType, Unit: "count"}}, } locations := make(map[uint64]*Location) for s.Scan() { line := s.Text() if isSpaceOrComment(line) { continue } if strings.HasPrefix(line, "---") { break } m := countRE.FindStringSubmatch(line) if m == nil { return nil, errMalformed } n, err := strconv.ParseInt(m[1], 0, 64) if err != nil { return nil, errMalformed } fields := strings.Fields(m[2]) locs := make([]*Location, 0, len(fields)) for _, stk := range fields { addr, err := strconv.ParseUint(stk, 0, 64) if err != nil { return nil, errMalformed } // Adjust all frames by -1 to land on top of the call instruction. addr-- loc := locations[addr] if loc == nil { loc = &Location{ Address: addr, } locations[addr] = loc p.Location = append(p.Location, loc) } locs = append(locs, loc) } p.Sample = append(p.Sample, &Sample{ Location: locs, Value: []int64{n}, }) } if err := s.Err(); err != nil { return nil, err } if err := parseAdditionalSections(s, p); err != nil { return nil, err } return p, nil } // remapLocationIDs ensures there is a location for each address // referenced by a sample, and remaps the samples to point to the new // location ids. func (p *Profile) remapLocationIDs() { seen := make(map[*Location]bool, len(p.Location)) var locs []*Location for _, s := range p.Sample { for _, l := range s.Location { if seen[l] { continue } l.ID = uint64(len(locs) + 1) locs = append(locs, l) seen[l] = true } } p.Location = locs } func (p *Profile) remapFunctionIDs() { seen := make(map[*Function]bool, len(p.Function)) var fns []*Function for _, l := range p.Location { for _, ln := range l.Line { fn := ln.Function if fn == nil || seen[fn] { continue } fn.ID = uint64(len(fns) + 1) fns = append(fns, fn) seen[fn] = true } } p.Function = fns } // remapMappingIDs matches location addresses with existing mappings // and updates them appropriately. This is O(N*M), if this ever shows // up as a bottleneck, evaluate sorting the mappings and doing a // binary search, which would make it O(N*log(M)). func (p *Profile) remapMappingIDs() { // Some profile handlers will incorrectly set regions for the main // executable if its section is remapped. Fix them through heuristics. if len(p.Mapping) > 0 { // Remove the initial mapping if named '/anon_hugepage' and has a // consecutive adjacent mapping. if m := p.Mapping[0]; strings.HasPrefix(m.File, "/anon_hugepage") { if len(p.Mapping) > 1 && m.Limit == p.Mapping[1].Start { p.Mapping = p.Mapping[1:] } } } // Subtract the offset from the start of the main mapping if it // ends up at a recognizable start address. if len(p.Mapping) > 0 { const expectedStart = 0x400000 if m := p.Mapping[0]; m.Start-m.Offset == expectedStart { m.Start = expectedStart m.Offset = 0 } } // Associate each location with an address to the corresponding // mapping. Create fake mapping if a suitable one isn't found. var fake *Mapping nextLocation: for _, l := range p.Location { a := l.Address if l.Mapping != nil || a == 0 { continue } for _, m := range p.Mapping { if m.Start <= a && a < m.Limit { l.Mapping = m continue nextLocation } } // Work around legacy handlers failing to encode the first // part of mappings split into adjacent ranges. for _, m := range p.Mapping { if m.Offset != 0 && m.Start-m.Offset <= a && a < m.Start { m.Start -= m.Offset m.Offset = 0 l.Mapping = m continue nextLocation } } // If there is still no mapping, create a fake one. // This is important for the Go legacy handler, which produced // no mappings. if fake == nil { fake = &Mapping{ ID: 1, Limit: ^uint64(0), } p.Mapping = append(p.Mapping, fake) } l.Mapping = fake } // Reset all mapping IDs. for i, m := range p.Mapping { m.ID = uint64(i + 1) } } var cpuInts = []func([]byte) (uint64, []byte){ get32l, get32b, get64l, get64b, } func get32l(b []byte) (uint64, []byte) { if len(b) < 4 { return 0, nil } return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24, b[4:] } func get32b(b []byte) (uint64, []byte) { if len(b) < 4 { return 0, nil } return uint64(b[3]) | uint64(b[2])<<8 | uint64(b[1])<<16 | uint64(b[0])<<24, b[4:] } func get64l(b []byte) (uint64, []byte) { if len(b) < 8 { return 0, nil } return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56, b[8:] } func get64b(b []byte) (uint64, []byte) { if len(b) < 8 { return 0, nil } return uint64(b[7]) | uint64(b[6])<<8 | uint64(b[5])<<16 | uint64(b[4])<<24 | uint64(b[3])<<32 | uint64(b[2])<<40 | uint64(b[1])<<48 | uint64(b[0])<<56, b[8:] } // parseCPU parses a profilez legacy profile and returns a newly // populated Profile. // // The general format for profilez samples is a sequence of words in // binary format. The first words are a header with the following data: // // 1st word -- 0 // 2nd word -- 3 // 3rd word -- 0 if a c++ application, 1 if a java application. // 4th word -- Sampling period (in microseconds). // 5th word -- Padding. func parseCPU(b []byte) (*Profile, error) { var parse func([]byte) (uint64, []byte) var n1, n2, n3, n4, n5 uint64 for _, parse = range cpuInts { var tmp []byte n1, tmp = parse(b) n2, tmp = parse(tmp) n3, tmp = parse(tmp) n4, tmp = parse(tmp) n5, tmp = parse(tmp) if tmp != nil && n1 == 0 && n2 == 3 && n3 == 0 && n4 > 0 && n5 == 0 { b = tmp return cpuProfile(b, int64(n4), parse) } if tmp != nil && n1 == 0 && n2 == 3 && n3 == 1 && n4 > 0 && n5 == 0 { b = tmp return javaCPUProfile(b, int64(n4), parse) } } return nil, errUnrecognized } // cpuProfile returns a new Profile from C++ profilez data. // b is the profile bytes after the header, period is the profiling // period, and parse is a function to parse 8-byte chunks from the // profile in its native endianness. func cpuProfile(b []byte, period int64, parse func(b []byte) (uint64, []byte)) (*Profile, error) { p := &Profile{ Period: period * 1000, PeriodType: &ValueType{Type: "cpu", Unit: "nanoseconds"}, SampleType: []*ValueType{ {Type: "samples", Unit: "count"}, {Type: "cpu", Unit: "nanoseconds"}, }, } var err error if b, _, err = parseCPUSamples(b, parse, true, p); err != nil { return nil, err } // If *most* samples have the same second-to-the-bottom frame, it // strongly suggests that it is an uninteresting artifact of // measurement -- a stack frame pushed by the signal handler. The // bottom frame is always correct as it is picked up from the signal // structure, not the stack. Check if this is the case and if so, // remove. // Remove up to two frames. maxiter := 2 // Allow one different sample for this many samples with the same // second-to-last frame. similarSamples := 32 margin := len(p.Sample) / similarSamples for iter := 0; iter < maxiter; iter++ { addr1 := make(map[uint64]int) for _, s := range p.Sample { if len(s.Location) > 1 { a := s.Location[1].Address addr1[a] = addr1[a] + 1 } } for id1, count := range addr1 { if count >= len(p.Sample)-margin { // Found uninteresting frame, strip it out from all samples for _, s := range p.Sample { if len(s.Location) > 1 && s.Location[1].Address == id1 { s.Location = append(s.Location[:1], s.Location[2:]...) } } break } } } if err := p.ParseMemoryMap(bytes.NewBuffer(b)); err != nil { return nil, err } cleanupDuplicateLocations(p) return p, nil } func cleanupDuplicateLocations(p *Profile) { // The profile handler may duplicate the leaf frame, because it gets // its address both from stack unwinding and from the signal // context. Detect this and delete the duplicate, which has been // adjusted by -1. The leaf address should not be adjusted as it is // not a call. for _, s := range p.Sample { if len(s.Location) > 1 && s.Location[0].Address == s.Location[1].Address+1 { s.Location = append(s.Location[:1], s.Location[2:]...) } } } // parseCPUSamples parses a collection of profilez samples from a // profile. // // profilez samples are a repeated sequence of stack frames of the // form: // // 1st word -- The number of times this stack was encountered. // 2nd word -- The size of the stack (StackSize). // 3rd word -- The first address on the stack. // ... // StackSize + 2 -- The last address on the stack // // The last stack trace is of the form: // // 1st word -- 0 // 2nd word -- 1 // 3rd word -- 0 // // Addresses from stack traces may point to the next instruction after // each call. Optionally adjust by -1 to land somewhere on the actual // call (except for the leaf, which is not a call). func parseCPUSamples(b []byte, parse func(b []byte) (uint64, []byte), adjust bool, p *Profile) ([]byte, map[uint64]*Location, error) { locs := make(map[uint64]*Location) for len(b) > 0 { var count, nstk uint64 count, b = parse(b) nstk, b = parse(b) if b == nil || nstk > uint64(len(b)/4) { return nil, nil, errUnrecognized } var sloc []*Location addrs := make([]uint64, nstk) for i := 0; i < int(nstk); i++ { addrs[i], b = parse(b) } if count == 0 && nstk == 1 && addrs[0] == 0 { // End of data marker break } for i, addr := range addrs { if adjust && i > 0 { addr-- } loc := locs[addr] if loc == nil { loc = &Location{ Address: addr, } locs[addr] = loc p.Location = append(p.Location, loc) } sloc = append(sloc, loc) } p.Sample = append(p.Sample, &Sample{ Value: []int64{int64(count), int64(count) * p.Period}, Location: sloc, }) } // Reached the end without finding the EOD marker. return b, locs, nil } // parseHeap parses a heapz legacy or a growthz profile and // returns a newly populated Profile. func parseHeap(b []byte) (p *Profile, err error) { s := bufio.NewScanner(bytes.NewBuffer(b)) if !s.Scan() { if err := s.Err(); err != nil { return nil, err } return nil, errUnrecognized } p = &Profile{} sampling := "" hasAlloc := false line := s.Text() p.PeriodType = &ValueType{Type: "space", Unit: "bytes"} if header := heapHeaderRE.FindStringSubmatch(line); header != nil { sampling, p.Period, hasAlloc, err = parseHeapHeader(line) if err != nil { return nil, err } } else if header = growthHeaderRE.FindStringSubmatch(line); header != nil { p.Period = 1 } else if header = fragmentationHeaderRE.FindStringSubmatch(line); header != nil { p.Period = 1 } else { return nil, errUnrecognized } if hasAlloc { // Put alloc before inuse so that default pprof selection // will prefer inuse_space. p.SampleType = []*ValueType{ {Type: "alloc_objects", Unit: "count"}, {Type: "alloc_space", Unit: "bytes"}, {Type: "inuse_objects", Unit: "count"}, {Type: "inuse_space", Unit: "bytes"}, } } else { p.SampleType = []*ValueType{ {Type: "objects", Unit: "count"}, {Type: "space", Unit: "bytes"}, } } locs := make(map[uint64]*Location) for s.Scan() { line := strings.TrimSpace(s.Text()) if isSpaceOrComment(line) { continue } if isMemoryMapSentinel(line) { break } value, blocksize, addrs, err := parseHeapSample(line, p.Period, sampling, hasAlloc) if err != nil { return nil, err } var sloc []*Location for _, addr := range addrs { // Addresses from stack traces point to the next instruction after // each call. Adjust by -1 to land somewhere on the actual call. addr-- loc := locs[addr] if locs[addr] == nil { loc = &Location{ Address: addr, } p.Location = append(p.Location, loc) locs[addr] = loc } sloc = append(sloc, loc) } p.Sample = append(p.Sample, &Sample{ Value: value, Location: sloc, NumLabel: map[string][]int64{"bytes": {blocksize}}, }) } if err := s.Err(); err != nil { return nil, err } if err := parseAdditionalSections(s, p); err != nil { return nil, err } return p, nil } func parseHeapHeader(line string) (sampling string, period int64, hasAlloc bool, err error) { header := heapHeaderRE.FindStringSubmatch(line) if header == nil { return "", 0, false, errUnrecognized } if len(header[6]) > 0 { if period, err = strconv.ParseInt(header[6], 10, 64); err != nil { return "", 0, false, errUnrecognized } } if (header[3] != header[1] && header[3] != "0") || (header[4] != header[2] && header[4] != "0") { hasAlloc = true } switch header[5] { case "heapz_v2", "heap_v2": return "v2", period, hasAlloc, nil case "heapprofile": return "", 1, hasAlloc, nil case "heap": return "v2", period / 2, hasAlloc, nil default: return "", 0, false, errUnrecognized } } // parseHeapSample parses a single row from a heap profile into a new Sample. func parseHeapSample(line string, rate int64, sampling string, includeAlloc bool) (value []int64, blocksize int64, addrs []uint64, err error) { sampleData := heapSampleRE.FindStringSubmatch(line) if len(sampleData) != 6 { return nil, 0, nil, fmt.Errorf("unexpected number of sample values: got %d, want 6", len(sampleData)) } // This is a local-scoped helper function to avoid needing to pass // around rate, sampling and many return parameters. addValues := func(countString, sizeString string, label string) error { count, err := strconv.ParseInt(countString, 10, 64) if err != nil { return fmt.Errorf("malformed sample: %s: %v", line, err) } size, err := strconv.ParseInt(sizeString, 10, 64) if err != nil { return fmt.Errorf("malformed sample: %s: %v", line, err) } if count == 0 && size != 0 { return fmt.Errorf("%s count was 0 but %s bytes was %d", label, label, size) } if count != 0 { blocksize = size / count if sampling == "v2" { count, size = scaleHeapSample(count, size, rate) } } value = append(value, count, size) return nil } if includeAlloc { if err := addValues(sampleData[3], sampleData[4], "allocation"); err != nil { return nil, 0, nil, err } } if err := addValues(sampleData[1], sampleData[2], "inuse"); err != nil { return nil, 0, nil, err } addrs, err = parseHexAddresses(sampleData[5]) if err != nil { return nil, 0, nil, fmt.Errorf("malformed sample: %s: %v", line, err) } return value, blocksize, addrs, nil } // parseHexAddresses extracts hex numbers from a string, attempts to convert // each to an unsigned 64-bit number and returns the resulting numbers as a // slice, or an error if the string contains hex numbers which are too large to // handle (which means a malformed profile). func parseHexAddresses(s string) ([]uint64, error) { hexStrings := hexNumberRE.FindAllString(s, -1) var addrs []uint64 for _, s := range hexStrings { if addr, err := strconv.ParseUint(s, 0, 64); err == nil { addrs = append(addrs, addr) } else { return nil, fmt.Errorf("failed to parse as hex 64-bit number: %s", s) } } return addrs, nil } // scaleHeapSample adjusts the data from a heapz Sample to // account for its probability of appearing in the collected // data. heapz profiles are a sampling of the memory allocations // requests in a program. We estimate the unsampled value by dividing // each collected sample by its probability of appearing in the // profile. heapz v2 profiles rely on a poisson process to determine // which samples to collect, based on the desired average collection // rate R. The probability of a sample of size S to appear in that // profile is 1-exp(-S/R). func scaleHeapSample(count, size, rate int64) (int64, int64) { if count == 0 || size == 0 { return 0, 0 } if rate <= 1 { // if rate==1 all samples were collected so no adjustment is needed. // if rate<1 treat as unknown and skip scaling. return count, size } avgSize := float64(size) / float64(count) scale := 1 / (1 - math.Exp(-avgSize/float64(rate))) return int64(float64(count) * scale), int64(float64(size) * scale) } // parseContention parses a mutex or contention profile. There are 2 cases: // "--- contentionz " for legacy C++ profiles (and backwards compatibility) // "--- mutex:" or "--- contention:" for profiles generated by the Go runtime. func parseContention(b []byte) (*Profile, error) { s := bufio.NewScanner(bytes.NewBuffer(b)) if !s.Scan() { if err := s.Err(); err != nil { return nil, err } return nil, errUnrecognized } switch l := s.Text(); { case strings.HasPrefix(l, "--- contentionz "): case strings.HasPrefix(l, "--- mutex:"): case strings.HasPrefix(l, "--- contention:"): default: return nil, errUnrecognized } p := &Profile{ PeriodType: &ValueType{Type: "contentions", Unit: "count"}, Period: 1, SampleType: []*ValueType{ {Type: "contentions", Unit: "count"}, {Type: "delay", Unit: "nanoseconds"}, }, } var cpuHz int64 // Parse text of the form "attribute = value" before the samples. const delimiter = "=" for s.Scan() { line := s.Text() if line = strings.TrimSpace(line); isSpaceOrComment(line) { continue } if strings.HasPrefix(line, "---") { break } attr := strings.SplitN(line, delimiter, 2) if len(attr) != 2 { break } key, val := strings.TrimSpace(attr[0]), strings.TrimSpace(attr[1]) var err error switch key { case "cycles/second": if cpuHz, err = strconv.ParseInt(val, 0, 64); err != nil { return nil, errUnrecognized } case "sampling period": if p.Period, err = strconv.ParseInt(val, 0, 64); err != nil { return nil, errUnrecognized } case "ms since reset": ms, err := strconv.ParseInt(val, 0, 64) if err != nil { return nil, errUnrecognized } p.DurationNanos = ms * 1000 * 1000 case "format": // CPP contentionz profiles don't have format. return nil, errUnrecognized case "resolution": // CPP contentionz profiles don't have resolution. return nil, errUnrecognized case "discarded samples": default: return nil, errUnrecognized } } if err := s.Err(); err != nil { return nil, err } locs := make(map[uint64]*Location) for { line := strings.TrimSpace(s.Text()) if strings.HasPrefix(line, "---") { break } if !isSpaceOrComment(line) { value, addrs, err := parseContentionSample(line, p.Period, cpuHz) if err != nil { return nil, err } var sloc []*Location for _, addr := range addrs { // Addresses from stack traces point to the next instruction after // each call. Adjust by -1 to land somewhere on the actual call. addr-- loc := locs[addr] if locs[addr] == nil { loc = &Location{ Address: addr, } p.Location = append(p.Location, loc) locs[addr] = loc } sloc = append(sloc, loc) } p.Sample = append(p.Sample, &Sample{ Value: value, Location: sloc, }) } if !s.Scan() { break } } if err := s.Err(); err != nil { return nil, err } if err := parseAdditionalSections(s, p); err != nil { return nil, err } return p, nil } // parseContentionSample parses a single row from a contention profile // into a new Sample. func parseContentionSample(line string, period, cpuHz int64) (value []int64, addrs []uint64, err error) { sampleData := contentionSampleRE.FindStringSubmatch(line) if sampleData == nil { return nil, nil, errUnrecognized } v1, err := strconv.ParseInt(sampleData[1], 10, 64) if err != nil { return nil, nil, fmt.Errorf("malformed sample: %s: %v", line, err) } v2, err := strconv.ParseInt(sampleData[2], 10, 64) if err != nil { return nil, nil, fmt.Errorf("malformed sample: %s: %v", line, err) } // Unsample values if period and cpuHz are available. // - Delays are scaled to cycles and then to nanoseconds. // - Contentions are scaled to cycles. if period > 0 { if cpuHz > 0 { cpuGHz := float64(cpuHz) / 1e9 v1 = int64(float64(v1) * float64(period) / cpuGHz) } v2 = v2 * period } value = []int64{v2, v1} addrs, err = parseHexAddresses(sampleData[3]) if err != nil { return nil, nil, fmt.Errorf("malformed sample: %s: %v", line, err) } return value, addrs, nil } // parseThread parses a Threadz profile and returns a new Profile. func parseThread(b []byte) (*Profile, error) { s := bufio.NewScanner(bytes.NewBuffer(b)) // Skip past comments and empty lines seeking a real header. for s.Scan() && isSpaceOrComment(s.Text()) { } line := s.Text() if m := threadzStartRE.FindStringSubmatch(line); m != nil { // Advance over initial comments until first stack trace. for s.Scan() { if line = s.Text(); isMemoryMapSentinel(line) || strings.HasPrefix(line, "-") { break } } } else if t := threadStartRE.FindStringSubmatch(line); len(t) != 4 { return nil, errUnrecognized } p := &Profile{ SampleType: []*ValueType{{Type: "thread", Unit: "count"}}, PeriodType: &ValueType{Type: "thread", Unit: "count"}, Period: 1, } locs := make(map[uint64]*Location) // Recognize each thread and populate profile samples. for !isMemoryMapSentinel(line) { if strings.HasPrefix(line, "---- no stack trace for") { break } if t := threadStartRE.FindStringSubmatch(line); len(t) != 4 { return nil, errUnrecognized } var addrs []uint64 var err error line, addrs, err = parseThreadSample(s) if err != nil { return nil, err } if len(addrs) == 0 { // We got a --same as previous threads--. Bump counters. if len(p.Sample) > 0 { s := p.Sample[len(p.Sample)-1] s.Value[0]++ } continue } var sloc []*Location for i, addr := range addrs { // Addresses from stack traces point to the next instruction after // each call. Adjust by -1 to land somewhere on the actual call // (except for the leaf, which is not a call). if i > 0 { addr-- } loc := locs[addr] if locs[addr] == nil { loc = &Location{ Address: addr, } p.Location = append(p.Location, loc) locs[addr] = loc } sloc = append(sloc, loc) } p.Sample = append(p.Sample, &Sample{ Value: []int64{1}, Location: sloc, }) } if err := parseAdditionalSections(s, p); err != nil { return nil, err } cleanupDuplicateLocations(p) return p, nil } // parseThreadSample parses a symbolized or unsymbolized stack trace. // Returns the first line after the traceback, the sample (or nil if // it hits a 'same-as-previous' marker) and an error. func parseThreadSample(s *bufio.Scanner) (nextl string, addrs []uint64, err error) { var line string sameAsPrevious := false for s.Scan() { line = strings.TrimSpace(s.Text()) if line == "" { continue } if strings.HasPrefix(line, "---") { break } if strings.Contains(line, "same as previous thread") { sameAsPrevious = true continue } curAddrs, err := parseHexAddresses(line) if err != nil { return "", nil, fmt.Errorf("malformed sample: %s: %v", line, err) } addrs = append(addrs, curAddrs...) } if err := s.Err(); err != nil { return "", nil, err } if sameAsPrevious { return line, nil, nil } return line, addrs, nil } // parseAdditionalSections parses any additional sections in the // profile, ignoring any unrecognized sections. func parseAdditionalSections(s *bufio.Scanner, p *Profile) error { for !isMemoryMapSentinel(s.Text()) && s.Scan() { } if err := s.Err(); err != nil { return err } return p.ParseMemoryMapFromScanner(s) } // ParseProcMaps parses a memory map in the format of /proc/self/maps. // ParseMemoryMap should be called after setting on a profile to // associate locations to the corresponding mapping based on their // address. func ParseProcMaps(rd io.Reader) ([]*Mapping, error) { s := bufio.NewScanner(rd) return parseProcMapsFromScanner(s) } func parseProcMapsFromScanner(s *bufio.Scanner) ([]*Mapping, error) { var mapping []*Mapping var attrs []string const delimiter = "=" r := strings.NewReplacer() for s.Scan() { line := r.Replace(removeLoggingInfo(s.Text())) m, err := parseMappingEntry(line) if err != nil { if err == errUnrecognized { // Recognize assignments of the form: attr=value, and replace // $attr with value on subsequent mappings. if attr := strings.SplitN(line, delimiter, 2); len(attr) == 2 { attrs = append(attrs, "$"+strings.TrimSpace(attr[0]), strings.TrimSpace(attr[1])) r = strings.NewReplacer(attrs...) } // Ignore any unrecognized entries continue } return nil, err } if m == nil { continue } mapping = append(mapping, m) } if err := s.Err(); err != nil { return nil, err } return mapping, nil } // removeLoggingInfo detects and removes log prefix entries generated // by the glog package. If no logging prefix is detected, the string // is returned unmodified. func removeLoggingInfo(line string) string { if match := logInfoRE.FindStringIndex(line); match != nil { return line[match[1]:] } return line } // ParseMemoryMap parses a memory map in the format of // /proc/self/maps, and overrides the mappings in the current profile. // It renumbers the samples and locations in the profile correspondingly. func (p *Profile) ParseMemoryMap(rd io.Reader) error { return p.ParseMemoryMapFromScanner(bufio.NewScanner(rd)) } // ParseMemoryMapFromScanner parses a memory map in the format of // /proc/self/maps or a variety of legacy format, and overrides the // mappings in the current profile. It renumbers the samples and // locations in the profile correspondingly. func (p *Profile) ParseMemoryMapFromScanner(s *bufio.Scanner) error { mapping, err := parseProcMapsFromScanner(s) if err != nil { return err } p.Mapping = append(p.Mapping, mapping...) p.massageMappings() p.remapLocationIDs() p.remapFunctionIDs() p.remapMappingIDs() return nil } func parseMappingEntry(l string) (*Mapping, error) { var start, end, perm, file, offset, buildID string if me := procMapsRE.FindStringSubmatch(l); len(me) == 6 { start, end, perm, offset, file = me[1], me[2], me[3], me[4], me[5] } else if me := briefMapsRE.FindStringSubmatch(l); len(me) == 7 { start, end, perm, file, offset, buildID = me[1], me[2], me[3], me[4], me[5], me[6] } else { return nil, errUnrecognized } var err error mapping := &Mapping{ File: file, BuildID: buildID, } if perm != "" && !strings.Contains(perm, "x") { // Skip non-executable entries. return nil, nil } if mapping.Start, err = strconv.ParseUint(start, 16, 64); err != nil { return nil, errUnrecognized } if mapping.Limit, err = strconv.ParseUint(end, 16, 64); err != nil { return nil, errUnrecognized } if offset != "" { if mapping.Offset, err = strconv.ParseUint(offset, 16, 64); err != nil { return nil, errUnrecognized } } return mapping, nil } var memoryMapSentinels = []string{ "--- Memory map: ---", "MAPPED_LIBRARIES:", } // isMemoryMapSentinel returns true if the string contains one of the // known sentinels for memory map information. func isMemoryMapSentinel(line string) bool { for _, s := range memoryMapSentinels { if strings.Contains(line, s) { return true } } return false } func (p *Profile) addLegacyFrameInfo() { switch { case isProfileType(p, heapzSampleTypes): p.DropFrames, p.KeepFrames = allocRxStr, allocSkipRxStr case isProfileType(p, contentionzSampleTypes): p.DropFrames, p.KeepFrames = lockRxStr, "" default: p.DropFrames, p.KeepFrames = cpuProfilerRxStr, "" } } var heapzSampleTypes = [][]string{ {"allocations", "size"}, // early Go pprof profiles {"objects", "space"}, {"inuse_objects", "inuse_space"}, {"alloc_objects", "alloc_space"}, {"alloc_objects", "alloc_space", "inuse_objects", "inuse_space"}, // Go pprof legacy profiles } var contentionzSampleTypes = [][]string{ {"contentions", "delay"}, } func isProfileType(p *Profile, types [][]string) bool { st := p.SampleType nextType: for _, t := range types { if len(st) != len(t) { continue } for i := range st { if st[i].Type != t[i] { continue nextType } } return true } return false } var allocRxStr = strings.Join([]string{ // POSIX entry points. `calloc`, `cfree`, `malloc`, `free`, `memalign`, `do_memalign`, `(__)?posix_memalign`, `pvalloc`, `valloc`, `realloc`, // TC malloc. `tcmalloc::.*`, `tc_calloc`, `tc_cfree`, `tc_malloc`, `tc_free`, `tc_memalign`, `tc_posix_memalign`, `tc_pvalloc`, `tc_valloc`, `tc_realloc`, `tc_new`, `tc_delete`, `tc_newarray`, `tc_deletearray`, `tc_new_nothrow`, `tc_newarray_nothrow`, // Memory-allocation routines on OS X. `malloc_zone_malloc`, `malloc_zone_calloc`, `malloc_zone_valloc`, `malloc_zone_realloc`, `malloc_zone_memalign`, `malloc_zone_free`, // Go runtime `runtime\..*`, // Other misc. memory allocation routines `BaseArena::.*`, `(::)?do_malloc_no_errno`, `(::)?do_malloc_pages`, `(::)?do_malloc`, `DoSampledAllocation`, `MallocedMemBlock::MallocedMemBlock`, `_M_allocate`, `__builtin_(vec_)?delete`, `__builtin_(vec_)?new`, `__gnu_cxx::new_allocator::allocate`, `__libc_malloc`, `__malloc_alloc_template::allocate`, `allocate`, `cpp_alloc`, `operator new(\[\])?`, `simple_alloc::allocate`, }, `|`) var allocSkipRxStr = strings.Join([]string{ // Preserve Go runtime frames that appear in the middle/bottom of
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
true
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/google/pprof/profile/prune.go
vendor/github.com/google/pprof/profile/prune.go
// Copyright 2014 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Implements methods to remove frames from profiles. package profile import ( "fmt" "regexp" "strings" ) var ( reservedNames = []string{"(anonymous namespace)", "operator()"} bracketRx = func() *regexp.Regexp { var quotedNames []string for _, name := range append(reservedNames, "(") { quotedNames = append(quotedNames, regexp.QuoteMeta(name)) } return regexp.MustCompile(strings.Join(quotedNames, "|")) }() ) // simplifyFunc does some primitive simplification of function names. func simplifyFunc(f string) string { // Account for leading '.' on the PPC ELF v1 ABI. funcName := strings.TrimPrefix(f, ".") // Account for unsimplified names -- try to remove the argument list by trimming // starting from the first '(', but skipping reserved names that have '('. for _, ind := range bracketRx.FindAllStringSubmatchIndex(funcName, -1) { foundReserved := false for _, res := range reservedNames { if funcName[ind[0]:ind[1]] == res { foundReserved = true break } } if !foundReserved { funcName = funcName[:ind[0]] break } } return funcName } // Prune removes all nodes beneath a node matching dropRx, and not // matching keepRx. If the root node of a Sample matches, the sample // will have an empty stack. func (p *Profile) Prune(dropRx, keepRx *regexp.Regexp) { prune := make(map[uint64]bool) pruneBeneath := make(map[uint64]bool) // simplifyFunc can be expensive, so cache results. // Note that the same function name can be encountered many times due // different lines and addresses in the same function. pruneCache := map[string]bool{} // Map from function to whether or not to prune pruneFromHere := func(s string) bool { if r, ok := pruneCache[s]; ok { return r } funcName := simplifyFunc(s) if dropRx.MatchString(funcName) { if keepRx == nil || !keepRx.MatchString(funcName) { pruneCache[s] = true return true } } pruneCache[s] = false return false } for _, loc := range p.Location { var i int for i = len(loc.Line) - 1; i >= 0; i-- { if fn := loc.Line[i].Function; fn != nil && fn.Name != "" { if pruneFromHere(fn.Name) { break } } } if i >= 0 { // Found matching entry to prune. pruneBeneath[loc.ID] = true // Remove the matching location. if i == len(loc.Line)-1 { // Matched the top entry: prune the whole location. prune[loc.ID] = true } else { loc.Line = loc.Line[i+1:] } } } // Prune locs from each Sample for _, sample := range p.Sample { // Scan from the root to the leaves to find the prune location. // Do not prune frames before the first user frame, to avoid // pruning everything. foundUser := false for i := len(sample.Location) - 1; i >= 0; i-- { id := sample.Location[i].ID if !prune[id] && !pruneBeneath[id] { foundUser = true continue } if !foundUser { continue } if prune[id] { sample.Location = sample.Location[i+1:] break } if pruneBeneath[id] { sample.Location = sample.Location[i:] break } } } } // RemoveUninteresting prunes and elides profiles using built-in // tables of uninteresting function names. func (p *Profile) RemoveUninteresting() error { var keep, drop *regexp.Regexp var err error if p.DropFrames != "" { if drop, err = regexp.Compile("^(" + p.DropFrames + ")$"); err != nil { return fmt.Errorf("failed to compile regexp %s: %v", p.DropFrames, err) } if p.KeepFrames != "" { if keep, err = regexp.Compile("^(" + p.KeepFrames + ")$"); err != nil { return fmt.Errorf("failed to compile regexp %s: %v", p.KeepFrames, err) } } p.Prune(drop, keep) } return nil } // PruneFrom removes all nodes beneath the lowest node matching dropRx, not including itself. // // Please see the example below to understand this method as well as // the difference from Prune method. // // A sample contains Location of [A,B,C,B,D] where D is the top frame and there's no inline. // // PruneFrom(A) returns [A,B,C,B,D] because there's no node beneath A. // Prune(A, nil) returns [B,C,B,D] by removing A itself. // // PruneFrom(B) returns [B,C,B,D] by removing all nodes beneath the first B when scanning from the bottom. // Prune(B, nil) returns [D] because a matching node is found by scanning from the root. func (p *Profile) PruneFrom(dropRx *regexp.Regexp) { pruneBeneath := make(map[uint64]bool) for _, loc := range p.Location { for i := 0; i < len(loc.Line); i++ { if fn := loc.Line[i].Function; fn != nil && fn.Name != "" { funcName := simplifyFunc(fn.Name) if dropRx.MatchString(funcName) { // Found matching entry to prune. pruneBeneath[loc.ID] = true loc.Line = loc.Line[i:] break } } } } // Prune locs from each Sample for _, sample := range p.Sample { // Scan from the bottom leaf to the root to find the prune location. for i, loc := range sample.Location { if pruneBeneath[loc.ID] { sample.Location = sample.Location[i:] break } } } }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/google/pprof/profile/legacy_java_profile.go
vendor/github.com/google/pprof/profile/legacy_java_profile.go
// Copyright 2014 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // This file implements parsers to convert java legacy profiles into // the profile.proto format. package profile import ( "bytes" "fmt" "io" "path/filepath" "regexp" "strconv" "strings" ) var ( attributeRx = regexp.MustCompile(`([\w ]+)=([\w ]+)`) javaSampleRx = regexp.MustCompile(` *(\d+) +(\d+) +@ +([ x0-9a-f]*)`) javaLocationRx = regexp.MustCompile(`^\s*0x([[:xdigit:]]+)\s+(.*)\s*$`) javaLocationFileLineRx = regexp.MustCompile(`^(.*)\s+\((.+):(-?[[:digit:]]+)\)$`) javaLocationPathRx = regexp.MustCompile(`^(.*)\s+\((.*)\)$`) ) // javaCPUProfile returns a new Profile from profilez data. // b is the profile bytes after the header, period is the profiling // period, and parse is a function to parse 8-byte chunks from the // profile in its native endianness. func javaCPUProfile(b []byte, period int64, parse func(b []byte) (uint64, []byte)) (*Profile, error) { p := &Profile{ Period: period * 1000, PeriodType: &ValueType{Type: "cpu", Unit: "nanoseconds"}, SampleType: []*ValueType{{Type: "samples", Unit: "count"}, {Type: "cpu", Unit: "nanoseconds"}}, } var err error var locs map[uint64]*Location if b, locs, err = parseCPUSamples(b, parse, false, p); err != nil { return nil, err } if err = parseJavaLocations(b, locs, p); err != nil { return nil, err } // Strip out addresses for better merge. if err = p.Aggregate(true, true, true, true, false, false); err != nil { return nil, err } return p, nil } // parseJavaProfile returns a new profile from heapz or contentionz // data. b is the profile bytes after the header. func parseJavaProfile(b []byte) (*Profile, error) { h := bytes.SplitAfterN(b, []byte("\n"), 2) if len(h) < 2 { return nil, errUnrecognized } p := &Profile{ PeriodType: &ValueType{}, } header := string(bytes.TrimSpace(h[0])) var err error var pType string switch header { case "--- heapz 1 ---": pType = "heap" case "--- contentionz 1 ---": pType = "contention" default: return nil, errUnrecognized } if b, err = parseJavaHeader(pType, h[1], p); err != nil { return nil, err } var locs map[uint64]*Location if b, locs, err = parseJavaSamples(pType, b, p); err != nil { return nil, err } if err = parseJavaLocations(b, locs, p); err != nil { return nil, err } // Strip out addresses for better merge. if err = p.Aggregate(true, true, true, true, false, false); err != nil { return nil, err } return p, nil } // parseJavaHeader parses the attribute section on a java profile and // populates a profile. Returns the remainder of the buffer after all // attributes. func parseJavaHeader(pType string, b []byte, p *Profile) ([]byte, error) { nextNewLine := bytes.IndexByte(b, byte('\n')) for nextNewLine != -1 { line := string(bytes.TrimSpace(b[0:nextNewLine])) if line != "" { h := attributeRx.FindStringSubmatch(line) if h == nil { // Not a valid attribute, exit. return b, nil } attribute, value := strings.TrimSpace(h[1]), strings.TrimSpace(h[2]) var err error switch pType + "/" + attribute { case "heap/format", "cpu/format", "contention/format": if value != "java" { return nil, errUnrecognized } case "heap/resolution": p.SampleType = []*ValueType{ {Type: "inuse_objects", Unit: "count"}, {Type: "inuse_space", Unit: value}, } case "contention/resolution": p.SampleType = []*ValueType{ {Type: "contentions", Unit: "count"}, {Type: "delay", Unit: value}, } case "contention/sampling period": p.PeriodType = &ValueType{ Type: "contentions", Unit: "count", } if p.Period, err = strconv.ParseInt(value, 0, 64); err != nil { return nil, fmt.Errorf("failed to parse attribute %s: %v", line, err) } case "contention/ms since reset": millis, err := strconv.ParseInt(value, 0, 64) if err != nil { return nil, fmt.Errorf("failed to parse attribute %s: %v", line, err) } p.DurationNanos = millis * 1000 * 1000 default: return nil, errUnrecognized } } // Grab next line. b = b[nextNewLine+1:] nextNewLine = bytes.IndexByte(b, byte('\n')) } return b, nil } // parseJavaSamples parses the samples from a java profile and // populates the Samples in a profile. Returns the remainder of the // buffer after the samples. func parseJavaSamples(pType string, b []byte, p *Profile) ([]byte, map[uint64]*Location, error) { nextNewLine := bytes.IndexByte(b, byte('\n')) locs := make(map[uint64]*Location) for nextNewLine != -1 { line := string(bytes.TrimSpace(b[0:nextNewLine])) if line != "" { sample := javaSampleRx.FindStringSubmatch(line) if sample == nil { // Not a valid sample, exit. return b, locs, nil } // Java profiles have data/fields inverted compared to other // profile types. var err error value1, value2, value3 := sample[2], sample[1], sample[3] addrs, err := parseHexAddresses(value3) if err != nil { return nil, nil, fmt.Errorf("malformed sample: %s: %v", line, err) } var sloc []*Location for _, addr := range addrs { loc := locs[addr] if locs[addr] == nil { loc = &Location{ Address: addr, } p.Location = append(p.Location, loc) locs[addr] = loc } sloc = append(sloc, loc) } s := &Sample{ Value: make([]int64, 2), Location: sloc, } if s.Value[0], err = strconv.ParseInt(value1, 0, 64); err != nil { return nil, nil, fmt.Errorf("parsing sample %s: %v", line, err) } if s.Value[1], err = strconv.ParseInt(value2, 0, 64); err != nil { return nil, nil, fmt.Errorf("parsing sample %s: %v", line, err) } switch pType { case "heap": const javaHeapzSamplingRate = 524288 // 512K if s.Value[0] == 0 { return nil, nil, fmt.Errorf("parsing sample %s: second value must be non-zero", line) } s.NumLabel = map[string][]int64{"bytes": {s.Value[1] / s.Value[0]}} s.Value[0], s.Value[1] = scaleHeapSample(s.Value[0], s.Value[1], javaHeapzSamplingRate) case "contention": if period := p.Period; period != 0 { s.Value[0] = s.Value[0] * p.Period s.Value[1] = s.Value[1] * p.Period } } p.Sample = append(p.Sample, s) } // Grab next line. b = b[nextNewLine+1:] nextNewLine = bytes.IndexByte(b, byte('\n')) } return b, locs, nil } // parseJavaLocations parses the location information in a java // profile and populates the Locations in a profile. It uses the // location addresses from the profile as both the ID of each // location. func parseJavaLocations(b []byte, locs map[uint64]*Location, p *Profile) error { r := bytes.NewBuffer(b) fns := make(map[string]*Function) for { line, err := r.ReadString('\n') if err != nil { if err != io.EOF { return err } if line == "" { break } } if line = strings.TrimSpace(line); line == "" { continue } jloc := javaLocationRx.FindStringSubmatch(line) if len(jloc) != 3 { continue } addr, err := strconv.ParseUint(jloc[1], 16, 64) if err != nil { return fmt.Errorf("parsing sample %s: %v", line, err) } loc := locs[addr] if loc == nil { // Unused/unseen continue } var lineFunc, lineFile string var lineNo int64 if fileLine := javaLocationFileLineRx.FindStringSubmatch(jloc[2]); len(fileLine) == 4 { // Found a line of the form: "function (file:line)" lineFunc, lineFile = fileLine[1], fileLine[2] if n, err := strconv.ParseInt(fileLine[3], 10, 64); err == nil && n > 0 { lineNo = n } } else if filePath := javaLocationPathRx.FindStringSubmatch(jloc[2]); len(filePath) == 3 { // If there's not a file:line, it's a shared library path. // The path isn't interesting, so just give the .so. lineFunc, lineFile = filePath[1], filepath.Base(filePath[2]) } else if strings.Contains(jloc[2], "generated stub/JIT") { lineFunc = "STUB" } else { // Treat whole line as the function name. This is used by the // java agent for internal states such as "GC" or "VM". lineFunc = jloc[2] } fn := fns[lineFunc] if fn == nil { fn = &Function{ Name: lineFunc, SystemName: lineFunc, Filename: lineFile, } fns[lineFunc] = fn p.Function = append(p.Function, fn) } loc.Line = []Line{ { Function: fn, Line: lineNo, }, } loc.Address = 0 } p.remapLocationIDs() p.remapFunctionIDs() p.remapMappingIDs() return nil }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/google/gnostic-models/jsonschema/base.go
vendor/github.com/google/gnostic-models/jsonschema/base.go
// Copyright 2017 Google LLC. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // THIS FILE IS AUTOMATICALLY GENERATED. package jsonschema import ( "encoding/base64" ) func baseSchemaBytes() ([]byte, error){ return base64.StdEncoding.DecodeString( `ewogICAgImlkIjogImh0dHA6Ly9qc29uLXNjaGVtYS5vcmcvZHJhZnQtMDQvc2NoZW1hIyIsCiAgICAi JHNjaGVtYSI6ICJodHRwOi8vanNvbi1zY2hlbWEub3JnL2RyYWZ0LTA0L3NjaGVtYSMiLAogICAgImRl c2NyaXB0aW9uIjogIkNvcmUgc2NoZW1hIG1ldGEtc2NoZW1hIiwKICAgICJkZWZpbml0aW9ucyI6IHsK ICAgICAgICAic2NoZW1hQXJyYXkiOiB7CiAgICAgICAgICAgICJ0eXBlIjogImFycmF5IiwKICAgICAg ICAgICAgIm1pbkl0ZW1zIjogMSwKICAgICAgICAgICAgIml0ZW1zIjogeyAiJHJlZiI6ICIjIiB9CiAg ICAgICAgfSwKICAgICAgICAicG9zaXRpdmVJbnRlZ2VyIjogewogICAgICAgICAgICAidHlwZSI6ICJp bnRlZ2VyIiwKICAgICAgICAgICAgIm1pbmltdW0iOiAwCiAgICAgICAgfSwKICAgICAgICAicG9zaXRp dmVJbnRlZ2VyRGVmYXVsdDAiOiB7CiAgICAgICAgICAgICJhbGxPZiI6IFsgeyAiJHJlZiI6ICIjL2Rl ZmluaXRpb25zL3Bvc2l0aXZlSW50ZWdlciIgfSwgeyAiZGVmYXVsdCI6IDAgfSBdCiAgICAgICAgfSwK ICAgICAgICAic2ltcGxlVHlwZXMiOiB7CiAgICAgICAgICAgICJlbnVtIjogWyAiYXJyYXkiLCAiYm9v bGVhbiIsICJpbnRlZ2VyIiwgIm51bGwiLCAibnVtYmVyIiwgIm9iamVjdCIsICJzdHJpbmciIF0KICAg ICAgICB9LAogICAgICAgICJzdHJpbmdBcnJheSI6IHsKICAgICAgICAgICAgInR5cGUiOiAiYXJyYXki LAogICAgICAgICAgICAiaXRlbXMiOiB7ICJ0eXBlIjogInN0cmluZyIgfSwKICAgICAgICAgICAgIm1p bkl0ZW1zIjogMSwKICAgICAgICAgICAgInVuaXF1ZUl0ZW1zIjogdHJ1ZQogICAgICAgIH0KICAgIH0s CiAgICAidHlwZSI6ICJvYmplY3QiLAogICAgInByb3BlcnRpZXMiOiB7CiAgICAgICAgImlkIjogewog ICAgICAgICAgICAidHlwZSI6ICJzdHJpbmciLAogICAgICAgICAgICAiZm9ybWF0IjogInVyaSIKICAg ICAgICB9LAogICAgICAgICIkc2NoZW1hIjogewogICAgICAgICAgICAidHlwZSI6ICJzdHJpbmciLAog ICAgICAgICAgICAiZm9ybWF0IjogInVyaSIKICAgICAgICB9LAogICAgICAgICJ0aXRsZSI6IHsKICAg ICAgICAgICAgInR5cGUiOiAic3RyaW5nIgogICAgICAgIH0sCiAgICAgICAgImRlc2NyaXB0aW9uIjog ewogICAgICAgICAgICAidHlwZSI6ICJzdHJpbmciCiAgICAgICAgfSwKICAgICAgICAiZGVmYXVsdCI6 IHt9LAogICAgICAgICJtdWx0aXBsZU9mIjogewogICAgICAgICAgICAidHlwZSI6ICJudW1iZXIiLAog ICAgICAgICAgICAibWluaW11bSI6IDAsCiAgICAgICAgICAgICJleGNsdXNpdmVNaW5pbXVtIjogdHJ1 ZQogICAgICAgIH0sCiAgICAgICAgIm1heGltdW0iOiB7CiAgICAgICAgICAgICJ0eXBlIjogIm51bWJl ciIKICAgICAgICB9LAogICAgICAgICJleGNsdXNpdmVNYXhpbXVtIjogewogICAgICAgICAgICAidHlw ZSI6ICJib29sZWFuIiwKICAgICAgICAgICAgImRlZmF1bHQiOiBmYWxzZQogICAgICAgIH0sCiAgICAg ICAgIm1pbmltdW0iOiB7CiAgICAgICAgICAgICJ0eXBlIjogIm51bWJlciIKICAgICAgICB9LAogICAg ICAgICJleGNsdXNpdmVNaW5pbXVtIjogewogICAgICAgICAgICAidHlwZSI6ICJib29sZWFuIiwKICAg ICAgICAgICAgImRlZmF1bHQiOiBmYWxzZQogICAgICAgIH0sCiAgICAgICAgIm1heExlbmd0aCI6IHsg IiRyZWYiOiAiIy9kZWZpbml0aW9ucy9wb3NpdGl2ZUludGVnZXIiIH0sCiAgICAgICAgIm1pbkxlbmd0 aCI6IHsgIiRyZWYiOiAiIy9kZWZpbml0aW9ucy9wb3NpdGl2ZUludGVnZXJEZWZhdWx0MCIgfSwKICAg ICAgICAicGF0dGVybiI6IHsKICAgICAgICAgICAgInR5cGUiOiAic3RyaW5nIiwKICAgICAgICAgICAg ImZvcm1hdCI6ICJyZWdleCIKICAgICAgICB9LAogICAgICAgICJhZGRpdGlvbmFsSXRlbXMiOiB7CiAg ICAgICAgICAgICJhbnlPZiI6IFsKICAgICAgICAgICAgICAgIHsgInR5cGUiOiAiYm9vbGVhbiIgfSwK ICAgICAgICAgICAgICAgIHsgIiRyZWYiOiAiIyIgfQogICAgICAgICAgICBdLAogICAgICAgICAgICAi ZGVmYXVsdCI6IHt9CiAgICAgICAgfSwKICAgICAgICAiaXRlbXMiOiB7CiAgICAgICAgICAgICJhbnlP ZiI6IFsKICAgICAgICAgICAgICAgIHsgIiRyZWYiOiAiIyIgfSwKICAgICAgICAgICAgICAgIHsgIiRy ZWYiOiAiIy9kZWZpbml0aW9ucy9zY2hlbWFBcnJheSIgfQogICAgICAgICAgICBdLAogICAgICAgICAg ICAiZGVmYXVsdCI6IHt9CiAgICAgICAgfSwKICAgICAgICAibWF4SXRlbXMiOiB7ICIkcmVmIjogIiMv ZGVmaW5pdGlvbnMvcG9zaXRpdmVJbnRlZ2VyIiB9LAogICAgICAgICJtaW5JdGVtcyI6IHsgIiRyZWYi OiAiIy9kZWZpbml0aW9ucy9wb3NpdGl2ZUludGVnZXJEZWZhdWx0MCIgfSwKICAgICAgICAidW5pcXVl SXRlbXMiOiB7CiAgICAgICAgICAgICJ0eXBlIjogImJvb2xlYW4iLAogICAgICAgICAgICAiZGVmYXVs dCI6IGZhbHNlCiAgICAgICAgfSwKICAgICAgICAibWF4UHJvcGVydGllcyI6IHsgIiRyZWYiOiAiIy9k ZWZpbml0aW9ucy9wb3NpdGl2ZUludGVnZXIiIH0sCiAgICAgICAgIm1pblByb3BlcnRpZXMiOiB7ICIk cmVmIjogIiMvZGVmaW5pdGlvbnMvcG9zaXRpdmVJbnRlZ2VyRGVmYXVsdDAiIH0sCiAgICAgICAgInJl cXVpcmVkIjogeyAiJHJlZiI6ICIjL2RlZmluaXRpb25zL3N0cmluZ0FycmF5IiB9LAogICAgICAgICJh ZGRpdGlvbmFsUHJvcGVydGllcyI6IHsKICAgICAgICAgICAgImFueU9mIjogWwogICAgICAgICAgICAg ICAgeyAidHlwZSI6ICJib29sZWFuIiB9LAogICAgICAgICAgICAgICAgeyAiJHJlZiI6ICIjIiB9CiAg ICAgICAgICAgIF0sCiAgICAgICAgICAgICJkZWZhdWx0Ijoge30KICAgICAgICB9LAogICAgICAgICJk ZWZpbml0aW9ucyI6IHsKICAgICAgICAgICAgInR5cGUiOiAib2JqZWN0IiwKICAgICAgICAgICAgImFk ZGl0aW9uYWxQcm9wZXJ0aWVzIjogeyAiJHJlZiI6ICIjIiB9LAogICAgICAgICAgICAiZGVmYXVsdCI6 IHt9CiAgICAgICAgfSwKICAgICAgICAicHJvcGVydGllcyI6IHsKICAgICAgICAgICAgInR5cGUiOiAi b2JqZWN0IiwKICAgICAgICAgICAgImFkZGl0aW9uYWxQcm9wZXJ0aWVzIjogeyAiJHJlZiI6ICIjIiB9 LAogICAgICAgICAgICAiZGVmYXVsdCI6IHt9CiAgICAgICAgfSwKICAgICAgICAicGF0dGVyblByb3Bl cnRpZXMiOiB7CiAgICAgICAgICAgICJ0eXBlIjogIm9iamVjdCIsCiAgICAgICAgICAgICJhZGRpdGlv bmFsUHJvcGVydGllcyI6IHsgIiRyZWYiOiAiIyIgfSwKICAgICAgICAgICAgImRlZmF1bHQiOiB7fQog ICAgICAgIH0sCiAgICAgICAgImRlcGVuZGVuY2llcyI6IHsKICAgICAgICAgICAgInR5cGUiOiAib2Jq ZWN0IiwKICAgICAgICAgICAgImFkZGl0aW9uYWxQcm9wZXJ0aWVzIjogewogICAgICAgICAgICAgICAg ImFueU9mIjogWwogICAgICAgICAgICAgICAgICAgIHsgIiRyZWYiOiAiIyIgfSwKICAgICAgICAgICAg ICAgICAgICB7ICIkcmVmIjogIiMvZGVmaW5pdGlvbnMvc3RyaW5nQXJyYXkiIH0KICAgICAgICAgICAg ICAgIF0KICAgICAgICAgICAgfQogICAgICAgIH0sCiAgICAgICAgImVudW0iOiB7CiAgICAgICAgICAg ICJ0eXBlIjogImFycmF5IiwKICAgICAgICAgICAgIm1pbkl0ZW1zIjogMSwKICAgICAgICAgICAgInVu aXF1ZUl0ZW1zIjogdHJ1ZQogICAgICAgIH0sCiAgICAgICAgInR5cGUiOiB7CiAgICAgICAgICAgICJh bnlPZiI6IFsKICAgICAgICAgICAgICAgIHsgIiRyZWYiOiAiIy9kZWZpbml0aW9ucy9zaW1wbGVUeXBl cyIgfSwKICAgICAgICAgICAgICAgIHsKICAgICAgICAgICAgICAgICAgICAidHlwZSI6ICJhcnJheSIs CiAgICAgICAgICAgICAgICAgICAgIml0ZW1zIjogeyAiJHJlZiI6ICIjL2RlZmluaXRpb25zL3NpbXBs ZVR5cGVzIiB9LAogICAgICAgICAgICAgICAgICAgICJtaW5JdGVtcyI6IDEsCiAgICAgICAgICAgICAg ICAgICAgInVuaXF1ZUl0ZW1zIjogdHJ1ZQogICAgICAgICAgICAgICAgfQogICAgICAgICAgICBdCiAg ICAgICAgfSwKICAgICAgICAiYWxsT2YiOiB7ICIkcmVmIjogIiMvZGVmaW5pdGlvbnMvc2NoZW1hQXJy YXkiIH0sCiAgICAgICAgImFueU9mIjogeyAiJHJlZiI6ICIjL2RlZmluaXRpb25zL3NjaGVtYUFycmF5 IiB9LAogICAgICAgICJvbmVPZiI6IHsgIiRyZWYiOiAiIy9kZWZpbml0aW9ucy9zY2hlbWFBcnJheSIg fSwKICAgICAgICAibm90IjogeyAiJHJlZiI6ICIjIiB9CiAgICB9LAogICAgImRlcGVuZGVuY2llcyI6 IHsKICAgICAgICAiZXhjbHVzaXZlTWF4aW11bSI6IFsgIm1heGltdW0iIF0sCiAgICAgICAgImV4Y2x1 c2l2ZU1pbmltdW0iOiBbICJtaW5pbXVtIiBdCiAgICB9LAogICAgImRlZmF1bHQiOiB7fQp9Cg==`)}
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/google/gnostic-models/jsonschema/models.go
vendor/github.com/google/gnostic-models/jsonschema/models.go
// Copyright 2017 Google LLC. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package jsonschema supports the reading, writing, and manipulation // of JSON Schemas. package jsonschema import "gopkg.in/yaml.v3" // The Schema struct models a JSON Schema and, because schemas are // defined hierarchically, contains many references to itself. // All fields are pointers and are nil if the associated values // are not specified. type Schema struct { Schema *string // $schema ID *string // id keyword used for $ref resolution scope Ref *string // $ref, i.e. JSON Pointers // http://json-schema.org/latest/json-schema-validation.html // 5.1. Validation keywords for numeric instances (number and integer) MultipleOf *SchemaNumber Maximum *SchemaNumber ExclusiveMaximum *bool Minimum *SchemaNumber ExclusiveMinimum *bool // 5.2. Validation keywords for strings MaxLength *int64 MinLength *int64 Pattern *string // 5.3. Validation keywords for arrays AdditionalItems *SchemaOrBoolean Items *SchemaOrSchemaArray MaxItems *int64 MinItems *int64 UniqueItems *bool // 5.4. Validation keywords for objects MaxProperties *int64 MinProperties *int64 Required *[]string AdditionalProperties *SchemaOrBoolean Properties *[]*NamedSchema PatternProperties *[]*NamedSchema Dependencies *[]*NamedSchemaOrStringArray // 5.5. Validation keywords for any instance type Enumeration *[]SchemaEnumValue Type *StringOrStringArray AllOf *[]*Schema AnyOf *[]*Schema OneOf *[]*Schema Not *Schema Definitions *[]*NamedSchema // 6. Metadata keywords Title *string Description *string Default *yaml.Node // 7. Semantic validation with "format" Format *string } // These helper structs represent "combination" types that generally can // have values of one type or another. All are used to represent parts // of Schemas. // SchemaNumber represents a value that can be either an Integer or a Float. type SchemaNumber struct { Integer *int64 Float *float64 } // NewSchemaNumberWithInteger creates and returns a new object func NewSchemaNumberWithInteger(i int64) *SchemaNumber { result := &SchemaNumber{} result.Integer = &i return result } // NewSchemaNumberWithFloat creates and returns a new object func NewSchemaNumberWithFloat(f float64) *SchemaNumber { result := &SchemaNumber{} result.Float = &f return result } // SchemaOrBoolean represents a value that can be either a Schema or a Boolean. type SchemaOrBoolean struct { Schema *Schema Boolean *bool } // NewSchemaOrBooleanWithSchema creates and returns a new object func NewSchemaOrBooleanWithSchema(s *Schema) *SchemaOrBoolean { result := &SchemaOrBoolean{} result.Schema = s return result } // NewSchemaOrBooleanWithBoolean creates and returns a new object func NewSchemaOrBooleanWithBoolean(b bool) *SchemaOrBoolean { result := &SchemaOrBoolean{} result.Boolean = &b return result } // StringOrStringArray represents a value that can be either // a String or an Array of Strings. type StringOrStringArray struct { String *string StringArray *[]string } // NewStringOrStringArrayWithString creates and returns a new object func NewStringOrStringArrayWithString(s string) *StringOrStringArray { result := &StringOrStringArray{} result.String = &s return result } // NewStringOrStringArrayWithStringArray creates and returns a new object func NewStringOrStringArrayWithStringArray(a []string) *StringOrStringArray { result := &StringOrStringArray{} result.StringArray = &a return result } // SchemaOrStringArray represents a value that can be either // a Schema or an Array of Strings. type SchemaOrStringArray struct { Schema *Schema StringArray *[]string } // SchemaOrSchemaArray represents a value that can be either // a Schema or an Array of Schemas. type SchemaOrSchemaArray struct { Schema *Schema SchemaArray *[]*Schema } // NewSchemaOrSchemaArrayWithSchema creates and returns a new object func NewSchemaOrSchemaArrayWithSchema(s *Schema) *SchemaOrSchemaArray { result := &SchemaOrSchemaArray{} result.Schema = s return result } // NewSchemaOrSchemaArrayWithSchemaArray creates and returns a new object func NewSchemaOrSchemaArrayWithSchemaArray(a []*Schema) *SchemaOrSchemaArray { result := &SchemaOrSchemaArray{} result.SchemaArray = &a return result } // SchemaEnumValue represents a value that can be part of an // enumeration in a Schema. type SchemaEnumValue struct { String *string Bool *bool } // NamedSchema is a name-value pair that is used to emulate maps // with ordered keys. type NamedSchema struct { Name string Value *Schema } // NewNamedSchema creates and returns a new object func NewNamedSchema(name string, value *Schema) *NamedSchema { return &NamedSchema{Name: name, Value: value} } // NamedSchemaOrStringArray is a name-value pair that is used // to emulate maps with ordered keys. type NamedSchemaOrStringArray struct { Name string Value *SchemaOrStringArray } // Access named subschemas by name func namedSchemaArrayElementWithName(array *[]*NamedSchema, name string) *Schema { if array == nil { return nil } for _, pair := range *array { if pair.Name == name { return pair.Value } } return nil } // PropertyWithName returns the selected element. func (s *Schema) PropertyWithName(name string) *Schema { return namedSchemaArrayElementWithName(s.Properties, name) } // PatternPropertyWithName returns the selected element. func (s *Schema) PatternPropertyWithName(name string) *Schema { return namedSchemaArrayElementWithName(s.PatternProperties, name) } // DefinitionWithName returns the selected element. func (s *Schema) DefinitionWithName(name string) *Schema { return namedSchemaArrayElementWithName(s.Definitions, name) } // AddProperty adds a named property. func (s *Schema) AddProperty(name string, property *Schema) { *s.Properties = append(*s.Properties, NewNamedSchema(name, property)) }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/google/gnostic-models/jsonschema/writer.go
vendor/github.com/google/gnostic-models/jsonschema/writer.go
// Copyright 2017 Google LLC. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package jsonschema import ( "fmt" "gopkg.in/yaml.v3" ) const indentation = " " func renderMappingNode(node *yaml.Node, indent string) (result string) { result = "{\n" innerIndent := indent + indentation for i := 0; i < len(node.Content); i += 2 { // first print the key key := node.Content[i].Value result += fmt.Sprintf("%s\"%+v\": ", innerIndent, key) // then the value value := node.Content[i+1] switch value.Kind { case yaml.ScalarNode: result += "\"" + value.Value + "\"" case yaml.MappingNode: result += renderMappingNode(value, innerIndent) case yaml.SequenceNode: result += renderSequenceNode(value, innerIndent) default: result += fmt.Sprintf("???MapItem(Key:%+v, Value:%T)", value, value) } if i < len(node.Content)-2 { result += "," } result += "\n" } result += indent + "}" return result } func renderSequenceNode(node *yaml.Node, indent string) (result string) { result = "[\n" innerIndent := indent + indentation for i := 0; i < len(node.Content); i++ { item := node.Content[i] switch item.Kind { case yaml.ScalarNode: result += innerIndent + "\"" + item.Value + "\"" case yaml.MappingNode: result += innerIndent + renderMappingNode(item, innerIndent) + "" default: result += innerIndent + fmt.Sprintf("???ArrayItem(%+v)", item) } if i < len(node.Content)-1 { result += "," } result += "\n" } result += indent + "]" return result } func renderStringArray(array []string, indent string) (result string) { result = "[\n" innerIndent := indent + indentation for i, item := range array { result += innerIndent + "\"" + item + "\"" if i < len(array)-1 { result += "," } result += "\n" } result += indent + "]" return result } // Render renders a yaml.Node as JSON func Render(node *yaml.Node) string { if node.Kind == yaml.DocumentNode { if len(node.Content) == 1 { return Render(node.Content[0]) } } else if node.Kind == yaml.MappingNode { return renderMappingNode(node, "") + "\n" } else if node.Kind == yaml.SequenceNode { return renderSequenceNode(node, "") + "\n" } return "" } func (object *SchemaNumber) nodeValue() *yaml.Node { if object.Integer != nil { return nodeForInt64(*object.Integer) } else if object.Float != nil { return nodeForFloat64(*object.Float) } else { return nil } } func (object *SchemaOrBoolean) nodeValue() *yaml.Node { if object.Schema != nil { return object.Schema.nodeValue() } else if object.Boolean != nil { return nodeForBoolean(*object.Boolean) } else { return nil } } func nodeForStringArray(array []string) *yaml.Node { content := make([]*yaml.Node, 0) for _, item := range array { content = append(content, nodeForString(item)) } return nodeForSequence(content) } func nodeForSchemaArray(array []*Schema) *yaml.Node { content := make([]*yaml.Node, 0) for _, item := range array { content = append(content, item.nodeValue()) } return nodeForSequence(content) } func (object *StringOrStringArray) nodeValue() *yaml.Node { if object.String != nil { return nodeForString(*object.String) } else if object.StringArray != nil { return nodeForStringArray(*(object.StringArray)) } else { return nil } } func (object *SchemaOrStringArray) nodeValue() *yaml.Node { if object.Schema != nil { return object.Schema.nodeValue() } else if object.StringArray != nil { return nodeForStringArray(*(object.StringArray)) } else { return nil } } func (object *SchemaOrSchemaArray) nodeValue() *yaml.Node { if object.Schema != nil { return object.Schema.nodeValue() } else if object.SchemaArray != nil { return nodeForSchemaArray(*(object.SchemaArray)) } else { return nil } } func (object *SchemaEnumValue) nodeValue() *yaml.Node { if object.String != nil { return nodeForString(*object.String) } else if object.Bool != nil { return nodeForBoolean(*object.Bool) } else { return nil } } func nodeForNamedSchemaArray(array *[]*NamedSchema) *yaml.Node { content := make([]*yaml.Node, 0) for _, pair := range *(array) { content = appendPair(content, pair.Name, pair.Value.nodeValue()) } return nodeForMapping(content) } func nodeForNamedSchemaOrStringArray(array *[]*NamedSchemaOrStringArray) *yaml.Node { content := make([]*yaml.Node, 0) for _, pair := range *(array) { content = appendPair(content, pair.Name, pair.Value.nodeValue()) } return nodeForMapping(content) } func nodeForSchemaEnumArray(array *[]SchemaEnumValue) *yaml.Node { content := make([]*yaml.Node, 0) for _, item := range *array { content = append(content, item.nodeValue()) } return nodeForSequence(content) } func nodeForMapping(content []*yaml.Node) *yaml.Node { return &yaml.Node{ Kind: yaml.MappingNode, Content: content, } } func nodeForSequence(content []*yaml.Node) *yaml.Node { return &yaml.Node{ Kind: yaml.SequenceNode, Content: content, } } func nodeForString(value string) *yaml.Node { return &yaml.Node{ Kind: yaml.ScalarNode, Tag: "!!str", Value: value, } } func nodeForBoolean(value bool) *yaml.Node { return &yaml.Node{ Kind: yaml.ScalarNode, Tag: "!!bool", Value: fmt.Sprintf("%t", value), } } func nodeForInt64(value int64) *yaml.Node { return &yaml.Node{ Kind: yaml.ScalarNode, Tag: "!!int", Value: fmt.Sprintf("%d", value), } } func nodeForFloat64(value float64) *yaml.Node { return &yaml.Node{ Kind: yaml.ScalarNode, Tag: "!!float", Value: fmt.Sprintf("%f", value), } } func appendPair(nodes []*yaml.Node, name string, value *yaml.Node) []*yaml.Node { nodes = append(nodes, nodeForString(name)) nodes = append(nodes, value) return nodes } func (schema *Schema) nodeValue() *yaml.Node { n := &yaml.Node{Kind: yaml.MappingNode} content := make([]*yaml.Node, 0) if schema.Title != nil { content = appendPair(content, "title", nodeForString(*schema.Title)) } if schema.ID != nil { content = appendPair(content, "id", nodeForString(*schema.ID)) } if schema.Schema != nil { content = appendPair(content, "$schema", nodeForString(*schema.Schema)) } if schema.Type != nil { content = appendPair(content, "type", schema.Type.nodeValue()) } if schema.Items != nil { content = appendPair(content, "items", schema.Items.nodeValue()) } if schema.Description != nil { content = appendPair(content, "description", nodeForString(*schema.Description)) } if schema.Required != nil { content = appendPair(content, "required", nodeForStringArray(*schema.Required)) } if schema.AdditionalProperties != nil { content = appendPair(content, "additionalProperties", schema.AdditionalProperties.nodeValue()) } if schema.PatternProperties != nil { content = appendPair(content, "patternProperties", nodeForNamedSchemaArray(schema.PatternProperties)) } if schema.Properties != nil { content = appendPair(content, "properties", nodeForNamedSchemaArray(schema.Properties)) } if schema.Dependencies != nil { content = appendPair(content, "dependencies", nodeForNamedSchemaOrStringArray(schema.Dependencies)) } if schema.Ref != nil { content = appendPair(content, "$ref", nodeForString(*schema.Ref)) } if schema.MultipleOf != nil { content = appendPair(content, "multipleOf", schema.MultipleOf.nodeValue()) } if schema.Maximum != nil { content = appendPair(content, "maximum", schema.Maximum.nodeValue()) } if schema.ExclusiveMaximum != nil { content = appendPair(content, "exclusiveMaximum", nodeForBoolean(*schema.ExclusiveMaximum)) } if schema.Minimum != nil { content = appendPair(content, "minimum", schema.Minimum.nodeValue()) } if schema.ExclusiveMinimum != nil { content = appendPair(content, "exclusiveMinimum", nodeForBoolean(*schema.ExclusiveMinimum)) } if schema.MaxLength != nil { content = appendPair(content, "maxLength", nodeForInt64(*schema.MaxLength)) } if schema.MinLength != nil { content = appendPair(content, "minLength", nodeForInt64(*schema.MinLength)) } if schema.Pattern != nil { content = appendPair(content, "pattern", nodeForString(*schema.Pattern)) } if schema.AdditionalItems != nil { content = appendPair(content, "additionalItems", schema.AdditionalItems.nodeValue()) } if schema.MaxItems != nil { content = appendPair(content, "maxItems", nodeForInt64(*schema.MaxItems)) } if schema.MinItems != nil { content = appendPair(content, "minItems", nodeForInt64(*schema.MinItems)) } if schema.UniqueItems != nil { content = appendPair(content, "uniqueItems", nodeForBoolean(*schema.UniqueItems)) } if schema.MaxProperties != nil { content = appendPair(content, "maxProperties", nodeForInt64(*schema.MaxProperties)) } if schema.MinProperties != nil { content = appendPair(content, "minProperties", nodeForInt64(*schema.MinProperties)) } if schema.Enumeration != nil { content = appendPair(content, "enum", nodeForSchemaEnumArray(schema.Enumeration)) } if schema.AllOf != nil { content = appendPair(content, "allOf", nodeForSchemaArray(*schema.AllOf)) } if schema.AnyOf != nil { content = appendPair(content, "anyOf", nodeForSchemaArray(*schema.AnyOf)) } if schema.OneOf != nil { content = appendPair(content, "oneOf", nodeForSchemaArray(*schema.OneOf)) } if schema.Not != nil { content = appendPair(content, "not", schema.Not.nodeValue()) } if schema.Definitions != nil { content = appendPair(content, "definitions", nodeForNamedSchemaArray(schema.Definitions)) } if schema.Default != nil { // m = append(m, yaml.MapItem{Key: "default", Value: *schema.Default}) } if schema.Format != nil { content = appendPair(content, "format", nodeForString(*schema.Format)) } n.Content = content return n } // JSONString returns a json representation of a schema. func (schema *Schema) JSONString() string { node := schema.nodeValue() return Render(node) }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/google/gnostic-models/jsonschema/reader.go
vendor/github.com/google/gnostic-models/jsonschema/reader.go
// Copyright 2017 Google LLC. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:generate go run generate-base.go package jsonschema import ( "fmt" "io/ioutil" "strconv" "gopkg.in/yaml.v3" ) // This is a global map of all known Schemas. // It is initialized when the first Schema is created and inserted. var schemas map[string]*Schema // NewBaseSchema builds a schema object from an embedded json representation. func NewBaseSchema() (schema *Schema, err error) { b, err := baseSchemaBytes() if err != nil { return nil, err } var node yaml.Node err = yaml.Unmarshal(b, &node) if err != nil { return nil, err } return NewSchemaFromObject(&node), nil } // NewSchemaFromFile reads a schema from a file. // Currently this assumes that schemas are stored in the source distribution of this project. func NewSchemaFromFile(filename string) (schema *Schema, err error) { file, err := ioutil.ReadFile(filename) if err != nil { return nil, err } var node yaml.Node err = yaml.Unmarshal(file, &node) if err != nil { return nil, err } return NewSchemaFromObject(&node), nil } // NewSchemaFromObject constructs a schema from a parsed JSON object. // Due to the complexity of the schema representation, this is a // custom reader and not the standard Go JSON reader (encoding/json). func NewSchemaFromObject(jsonData *yaml.Node) *Schema { switch jsonData.Kind { case yaml.DocumentNode: return NewSchemaFromObject(jsonData.Content[0]) case yaml.MappingNode: schema := &Schema{} for i := 0; i < len(jsonData.Content); i += 2 { k := jsonData.Content[i].Value v := jsonData.Content[i+1] switch k { case "$schema": schema.Schema = schema.stringValue(v) case "id": schema.ID = schema.stringValue(v) case "multipleOf": schema.MultipleOf = schema.numberValue(v) case "maximum": schema.Maximum = schema.numberValue(v) case "exclusiveMaximum": schema.ExclusiveMaximum = schema.boolValue(v) case "minimum": schema.Minimum = schema.numberValue(v) case "exclusiveMinimum": schema.ExclusiveMinimum = schema.boolValue(v) case "maxLength": schema.MaxLength = schema.intValue(v) case "minLength": schema.MinLength = schema.intValue(v) case "pattern": schema.Pattern = schema.stringValue(v) case "additionalItems": schema.AdditionalItems = schema.schemaOrBooleanValue(v) case "items": schema.Items = schema.schemaOrSchemaArrayValue(v) case "maxItems": schema.MaxItems = schema.intValue(v) case "minItems": schema.MinItems = schema.intValue(v) case "uniqueItems": schema.UniqueItems = schema.boolValue(v) case "maxProperties": schema.MaxProperties = schema.intValue(v) case "minProperties": schema.MinProperties = schema.intValue(v) case "required": schema.Required = schema.arrayOfStringsValue(v) case "additionalProperties": schema.AdditionalProperties = schema.schemaOrBooleanValue(v) case "properties": schema.Properties = schema.mapOfSchemasValue(v) case "patternProperties": schema.PatternProperties = schema.mapOfSchemasValue(v) case "dependencies": schema.Dependencies = schema.mapOfSchemasOrStringArraysValue(v) case "enum": schema.Enumeration = schema.arrayOfEnumValuesValue(v) case "type": schema.Type = schema.stringOrStringArrayValue(v) case "allOf": schema.AllOf = schema.arrayOfSchemasValue(v) case "anyOf": schema.AnyOf = schema.arrayOfSchemasValue(v) case "oneOf": schema.OneOf = schema.arrayOfSchemasValue(v) case "not": schema.Not = NewSchemaFromObject(v) case "definitions": schema.Definitions = schema.mapOfSchemasValue(v) case "title": schema.Title = schema.stringValue(v) case "description": schema.Description = schema.stringValue(v) case "default": schema.Default = v case "format": schema.Format = schema.stringValue(v) case "$ref": schema.Ref = schema.stringValue(v) default: fmt.Printf("UNSUPPORTED (%s)\n", k) } } // insert schema in global map if schema.ID != nil { if schemas == nil { schemas = make(map[string]*Schema, 0) } schemas[*(schema.ID)] = schema } return schema default: fmt.Printf("schemaValue: unexpected node %+v\n", jsonData) return nil } return nil } // // BUILDERS // The following methods build elements of Schemas from interface{} values. // Each returns nil if it is unable to build the desired element. // // Gets the string value of an interface{} value if possible. func (schema *Schema) stringValue(v *yaml.Node) *string { switch v.Kind { case yaml.ScalarNode: return &v.Value default: fmt.Printf("stringValue: unexpected node %+v\n", v) } return nil } // Gets the numeric value of an interface{} value if possible. func (schema *Schema) numberValue(v *yaml.Node) *SchemaNumber { number := &SchemaNumber{} switch v.Kind { case yaml.ScalarNode: switch v.Tag { case "!!float": v2, _ := strconv.ParseFloat(v.Value, 64) number.Float = &v2 return number case "!!int": v2, _ := strconv.ParseInt(v.Value, 10, 64) number.Integer = &v2 return number default: fmt.Printf("stringValue: unexpected node %+v\n", v) } default: fmt.Printf("stringValue: unexpected node %+v\n", v) } return nil } // Gets the integer value of an interface{} value if possible. func (schema *Schema) intValue(v *yaml.Node) *int64 { switch v.Kind { case yaml.ScalarNode: switch v.Tag { case "!!float": v2, _ := strconv.ParseFloat(v.Value, 64) v3 := int64(v2) return &v3 case "!!int": v2, _ := strconv.ParseInt(v.Value, 10, 64) return &v2 default: fmt.Printf("intValue: unexpected node %+v\n", v) } default: fmt.Printf("intValue: unexpected node %+v\n", v) } return nil } // Gets the bool value of an interface{} value if possible. func (schema *Schema) boolValue(v *yaml.Node) *bool { switch v.Kind { case yaml.ScalarNode: switch v.Tag { case "!!bool": v2, _ := strconv.ParseBool(v.Value) return &v2 default: fmt.Printf("boolValue: unexpected node %+v\n", v) } default: fmt.Printf("boolValue: unexpected node %+v\n", v) } return nil } // Gets a map of Schemas from an interface{} value if possible. func (schema *Schema) mapOfSchemasValue(v *yaml.Node) *[]*NamedSchema { switch v.Kind { case yaml.MappingNode: m := make([]*NamedSchema, 0) for i := 0; i < len(v.Content); i += 2 { k2 := v.Content[i].Value v2 := v.Content[i+1] pair := &NamedSchema{Name: k2, Value: NewSchemaFromObject(v2)} m = append(m, pair) } return &m default: fmt.Printf("mapOfSchemasValue: unexpected node %+v\n", v) } return nil } // Gets an array of Schemas from an interface{} value if possible. func (schema *Schema) arrayOfSchemasValue(v *yaml.Node) *[]*Schema { switch v.Kind { case yaml.SequenceNode: m := make([]*Schema, 0) for _, v2 := range v.Content { switch v2.Kind { case yaml.MappingNode: s := NewSchemaFromObject(v2) m = append(m, s) default: fmt.Printf("arrayOfSchemasValue: unexpected node %+v\n", v2) } } return &m case yaml.MappingNode: m := make([]*Schema, 0) s := NewSchemaFromObject(v) m = append(m, s) return &m default: fmt.Printf("arrayOfSchemasValue: unexpected node %+v\n", v) } return nil } // Gets a Schema or an array of Schemas from an interface{} value if possible. func (schema *Schema) schemaOrSchemaArrayValue(v *yaml.Node) *SchemaOrSchemaArray { switch v.Kind { case yaml.SequenceNode: m := make([]*Schema, 0) for _, v2 := range v.Content { switch v2.Kind { case yaml.MappingNode: s := NewSchemaFromObject(v2) m = append(m, s) default: fmt.Printf("schemaOrSchemaArrayValue: unexpected node %+v\n", v2) } } return &SchemaOrSchemaArray{SchemaArray: &m} case yaml.MappingNode: s := NewSchemaFromObject(v) return &SchemaOrSchemaArray{Schema: s} default: fmt.Printf("schemaOrSchemaArrayValue: unexpected node %+v\n", v) } return nil } // Gets an array of strings from an interface{} value if possible. func (schema *Schema) arrayOfStringsValue(v *yaml.Node) *[]string { switch v.Kind { case yaml.ScalarNode: a := []string{v.Value} return &a case yaml.SequenceNode: a := make([]string, 0) for _, v2 := range v.Content { switch v2.Kind { case yaml.ScalarNode: a = append(a, v2.Value) default: fmt.Printf("arrayOfStringsValue: unexpected node %+v\n", v2) } } return &a default: fmt.Printf("arrayOfStringsValue: unexpected node %+v\n", v) } return nil } // Gets a string or an array of strings from an interface{} value if possible. func (schema *Schema) stringOrStringArrayValue(v *yaml.Node) *StringOrStringArray { switch v.Kind { case yaml.ScalarNode: s := &StringOrStringArray{} s.String = &v.Value return s case yaml.SequenceNode: a := make([]string, 0) for _, v2 := range v.Content { switch v2.Kind { case yaml.ScalarNode: a = append(a, v2.Value) default: fmt.Printf("arrayOfStringsValue: unexpected node %+v\n", v2) } } s := &StringOrStringArray{} s.StringArray = &a return s default: fmt.Printf("arrayOfStringsValue: unexpected node %+v\n", v) } return nil } // Gets an array of enum values from an interface{} value if possible. func (schema *Schema) arrayOfEnumValuesValue(v *yaml.Node) *[]SchemaEnumValue { a := make([]SchemaEnumValue, 0) switch v.Kind { case yaml.SequenceNode: for _, v2 := range v.Content { switch v2.Kind { case yaml.ScalarNode: switch v2.Tag { case "!!str": a = append(a, SchemaEnumValue{String: &v2.Value}) case "!!bool": v3, _ := strconv.ParseBool(v2.Value) a = append(a, SchemaEnumValue{Bool: &v3}) default: fmt.Printf("arrayOfEnumValuesValue: unexpected type %s\n", v2.Tag) } default: fmt.Printf("arrayOfEnumValuesValue: unexpected node %+v\n", v2) } } default: fmt.Printf("arrayOfEnumValuesValue: unexpected node %+v\n", v) } return &a } // Gets a map of schemas or string arrays from an interface{} value if possible. func (schema *Schema) mapOfSchemasOrStringArraysValue(v *yaml.Node) *[]*NamedSchemaOrStringArray { m := make([]*NamedSchemaOrStringArray, 0) switch v.Kind { case yaml.MappingNode: for i := 0; i < len(v.Content); i += 2 { k2 := v.Content[i].Value v2 := v.Content[i+1] switch v2.Kind { case yaml.SequenceNode: a := make([]string, 0) for _, v3 := range v2.Content { switch v3.Kind { case yaml.ScalarNode: a = append(a, v3.Value) default: fmt.Printf("mapOfSchemasOrStringArraysValue: unexpected node %+v\n", v3) } } s := &SchemaOrStringArray{} s.StringArray = &a pair := &NamedSchemaOrStringArray{Name: k2, Value: s} m = append(m, pair) default: fmt.Printf("mapOfSchemasOrStringArraysValue: unexpected node %+v\n", v2) } } default: fmt.Printf("mapOfSchemasOrStringArraysValue: unexpected node %+v\n", v) } return &m } // Gets a schema or a boolean value from an interface{} value if possible. func (schema *Schema) schemaOrBooleanValue(v *yaml.Node) *SchemaOrBoolean { schemaOrBoolean := &SchemaOrBoolean{} switch v.Kind { case yaml.ScalarNode: v2, _ := strconv.ParseBool(v.Value) schemaOrBoolean.Boolean = &v2 case yaml.MappingNode: schemaOrBoolean.Schema = NewSchemaFromObject(v) default: fmt.Printf("schemaOrBooleanValue: unexpected node %+v\n", v) } return schemaOrBoolean }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/google/gnostic-models/jsonschema/operations.go
vendor/github.com/google/gnostic-models/jsonschema/operations.go
// Copyright 2017 Google LLC. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package jsonschema import ( "fmt" "log" "strings" ) // // OPERATIONS // The following methods perform operations on Schemas. // // IsEmpty returns true if no members of the Schema are specified. func (schema *Schema) IsEmpty() bool { return (schema.Schema == nil) && (schema.ID == nil) && (schema.MultipleOf == nil) && (schema.Maximum == nil) && (schema.ExclusiveMaximum == nil) && (schema.Minimum == nil) && (schema.ExclusiveMinimum == nil) && (schema.MaxLength == nil) && (schema.MinLength == nil) && (schema.Pattern == nil) && (schema.AdditionalItems == nil) && (schema.Items == nil) && (schema.MaxItems == nil) && (schema.MinItems == nil) && (schema.UniqueItems == nil) && (schema.MaxProperties == nil) && (schema.MinProperties == nil) && (schema.Required == nil) && (schema.AdditionalProperties == nil) && (schema.Properties == nil) && (schema.PatternProperties == nil) && (schema.Dependencies == nil) && (schema.Enumeration == nil) && (schema.Type == nil) && (schema.AllOf == nil) && (schema.AnyOf == nil) && (schema.OneOf == nil) && (schema.Not == nil) && (schema.Definitions == nil) && (schema.Title == nil) && (schema.Description == nil) && (schema.Default == nil) && (schema.Format == nil) && (schema.Ref == nil) } // IsEqual returns true if two schemas are equal. func (schema *Schema) IsEqual(schema2 *Schema) bool { return schema.String() == schema2.String() } // SchemaOperation represents a function that can be applied to a Schema. type SchemaOperation func(schema *Schema, context string) // Applies a specified function to a Schema and all of the Schemas that it contains. func (schema *Schema) applyToSchemas(operation SchemaOperation, context string) { if schema.AdditionalItems != nil { s := schema.AdditionalItems.Schema if s != nil { s.applyToSchemas(operation, "AdditionalItems") } } if schema.Items != nil { if schema.Items.SchemaArray != nil { for _, s := range *(schema.Items.SchemaArray) { s.applyToSchemas(operation, "Items.SchemaArray") } } else if schema.Items.Schema != nil { schema.Items.Schema.applyToSchemas(operation, "Items.Schema") } } if schema.AdditionalProperties != nil { s := schema.AdditionalProperties.Schema if s != nil { s.applyToSchemas(operation, "AdditionalProperties") } } if schema.Properties != nil { for _, pair := range *(schema.Properties) { s := pair.Value s.applyToSchemas(operation, "Properties") } } if schema.PatternProperties != nil { for _, pair := range *(schema.PatternProperties) { s := pair.Value s.applyToSchemas(operation, "PatternProperties") } } if schema.Dependencies != nil { for _, pair := range *(schema.Dependencies) { schemaOrStringArray := pair.Value s := schemaOrStringArray.Schema if s != nil { s.applyToSchemas(operation, "Dependencies") } } } if schema.AllOf != nil { for _, s := range *(schema.AllOf) { s.applyToSchemas(operation, "AllOf") } } if schema.AnyOf != nil { for _, s := range *(schema.AnyOf) { s.applyToSchemas(operation, "AnyOf") } } if schema.OneOf != nil { for _, s := range *(schema.OneOf) { s.applyToSchemas(operation, "OneOf") } } if schema.Not != nil { schema.Not.applyToSchemas(operation, "Not") } if schema.Definitions != nil { for _, pair := range *(schema.Definitions) { s := pair.Value s.applyToSchemas(operation, "Definitions") } } operation(schema, context) } // CopyProperties copies all non-nil properties from the source Schema to the schema Schema. func (schema *Schema) CopyProperties(source *Schema) { if source.Schema != nil { schema.Schema = source.Schema } if source.ID != nil { schema.ID = source.ID } if source.MultipleOf != nil { schema.MultipleOf = source.MultipleOf } if source.Maximum != nil { schema.Maximum = source.Maximum } if source.ExclusiveMaximum != nil { schema.ExclusiveMaximum = source.ExclusiveMaximum } if source.Minimum != nil { schema.Minimum = source.Minimum } if source.ExclusiveMinimum != nil { schema.ExclusiveMinimum = source.ExclusiveMinimum } if source.MaxLength != nil { schema.MaxLength = source.MaxLength } if source.MinLength != nil { schema.MinLength = source.MinLength } if source.Pattern != nil { schema.Pattern = source.Pattern } if source.AdditionalItems != nil { schema.AdditionalItems = source.AdditionalItems } if source.Items != nil { schema.Items = source.Items } if source.MaxItems != nil { schema.MaxItems = source.MaxItems } if source.MinItems != nil { schema.MinItems = source.MinItems } if source.UniqueItems != nil { schema.UniqueItems = source.UniqueItems } if source.MaxProperties != nil { schema.MaxProperties = source.MaxProperties } if source.MinProperties != nil { schema.MinProperties = source.MinProperties } if source.Required != nil { schema.Required = source.Required } if source.AdditionalProperties != nil { schema.AdditionalProperties = source.AdditionalProperties } if source.Properties != nil { schema.Properties = source.Properties } if source.PatternProperties != nil { schema.PatternProperties = source.PatternProperties } if source.Dependencies != nil { schema.Dependencies = source.Dependencies } if source.Enumeration != nil { schema.Enumeration = source.Enumeration } if source.Type != nil { schema.Type = source.Type } if source.AllOf != nil { schema.AllOf = source.AllOf } if source.AnyOf != nil { schema.AnyOf = source.AnyOf } if source.OneOf != nil { schema.OneOf = source.OneOf } if source.Not != nil { schema.Not = source.Not } if source.Definitions != nil { schema.Definitions = source.Definitions } if source.Title != nil { schema.Title = source.Title } if source.Description != nil { schema.Description = source.Description } if source.Default != nil { schema.Default = source.Default } if source.Format != nil { schema.Format = source.Format } if source.Ref != nil { schema.Ref = source.Ref } } // TypeIs returns true if the Type of a Schema includes the specified type func (schema *Schema) TypeIs(typeName string) bool { if schema.Type != nil { // the schema Type is either a string or an array of strings if schema.Type.String != nil { return (*(schema.Type.String) == typeName) } else if schema.Type.StringArray != nil { for _, n := range *(schema.Type.StringArray) { if n == typeName { return true } } } } return false } // ResolveRefs resolves "$ref" elements in a Schema and its children. // But if a reference refers to an object type, is inside a oneOf, or contains a oneOf, // the reference is kept and we expect downstream tools to separately model these // referenced schemas. func (schema *Schema) ResolveRefs() { rootSchema := schema count := 1 for count > 0 { count = 0 schema.applyToSchemas( func(schema *Schema, context string) { if schema.Ref != nil { resolvedRef, err := rootSchema.resolveJSONPointer(*(schema.Ref)) if err != nil { log.Printf("%+v", err) } else if resolvedRef.TypeIs("object") { // don't substitute for objects, we'll model the referenced schema with a class } else if context == "OneOf" { // don't substitute for references inside oneOf declarations } else if resolvedRef.OneOf != nil { // don't substitute for references that contain oneOf declarations } else if resolvedRef.AdditionalProperties != nil { // don't substitute for references that look like objects } else { schema.Ref = nil schema.CopyProperties(resolvedRef) count++ } } }, "") } } // resolveJSONPointer resolves JSON pointers. // This current implementation is very crude and custom for OpenAPI 2.0 schemas. // It panics for any pointer that it is unable to resolve. func (schema *Schema) resolveJSONPointer(ref string) (result *Schema, err error) { parts := strings.Split(ref, "#") if len(parts) == 2 { documentName := parts[0] + "#" if documentName == "#" && schema.ID != nil { documentName = *(schema.ID) } path := parts[1] document := schemas[documentName] pathParts := strings.Split(path, "/") // we currently do a very limited (hard-coded) resolution of certain paths and log errors for missed cases if len(pathParts) == 1 { return document, nil } else if len(pathParts) == 3 { switch pathParts[1] { case "definitions": dictionary := document.Definitions for _, pair := range *dictionary { if pair.Name == pathParts[2] { result = pair.Value } } case "properties": dictionary := document.Properties for _, pair := range *dictionary { if pair.Name == pathParts[2] { result = pair.Value } } default: break } } } if result == nil { return nil, fmt.Errorf("unresolved pointer: %+v", ref) } return result, nil } // ResolveAllOfs replaces "allOf" elements by merging their properties into the parent Schema. func (schema *Schema) ResolveAllOfs() { schema.applyToSchemas( func(schema *Schema, context string) { if schema.AllOf != nil { for _, allOf := range *(schema.AllOf) { schema.CopyProperties(allOf) } schema.AllOf = nil } }, "resolveAllOfs") } // ResolveAnyOfs replaces all "anyOf" elements with "oneOf". func (schema *Schema) ResolveAnyOfs() { schema.applyToSchemas( func(schema *Schema, context string) { if schema.AnyOf != nil { schema.OneOf = schema.AnyOf schema.AnyOf = nil } }, "resolveAnyOfs") } // return a pointer to a copy of a passed-in string func stringptr(input string) (output *string) { return &input } // CopyOfficialSchemaProperty copies a named property from the official JSON Schema definition func (schema *Schema) CopyOfficialSchemaProperty(name string) { *schema.Properties = append(*schema.Properties, NewNamedSchema(name, &Schema{Ref: stringptr("http://json-schema.org/draft-04/schema#/properties/" + name)})) } // CopyOfficialSchemaProperties copies named properties from the official JSON Schema definition func (schema *Schema) CopyOfficialSchemaProperties(names []string) { for _, name := range names { schema.CopyOfficialSchemaProperty(name) } }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/google/gnostic-models/jsonschema/display.go
vendor/github.com/google/gnostic-models/jsonschema/display.go
// Copyright 2017 Google LLC. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package jsonschema import ( "fmt" "strings" ) // // DISPLAY // The following methods display Schemas. // // Description returns a string representation of a string or string array. func (s *StringOrStringArray) Description() string { if s.String != nil { return *s.String } if s.StringArray != nil { return strings.Join(*s.StringArray, ", ") } return "" } // Returns a string representation of a Schema. func (schema *Schema) String() string { return schema.describeSchema("") } // Helper: Returns a string representation of a Schema indented by a specified string. func (schema *Schema) describeSchema(indent string) string { result := "" if schema.Schema != nil { result += indent + "$schema: " + *(schema.Schema) + "\n" } if schema.ID != nil { result += indent + "id: " + *(schema.ID) + "\n" } if schema.MultipleOf != nil { result += indent + fmt.Sprintf("multipleOf: %+v\n", *(schema.MultipleOf)) } if schema.Maximum != nil { result += indent + fmt.Sprintf("maximum: %+v\n", *(schema.Maximum)) } if schema.ExclusiveMaximum != nil { result += indent + fmt.Sprintf("exclusiveMaximum: %+v\n", *(schema.ExclusiveMaximum)) } if schema.Minimum != nil { result += indent + fmt.Sprintf("minimum: %+v\n", *(schema.Minimum)) } if schema.ExclusiveMinimum != nil { result += indent + fmt.Sprintf("exclusiveMinimum: %+v\n", *(schema.ExclusiveMinimum)) } if schema.MaxLength != nil { result += indent + fmt.Sprintf("maxLength: %+v\n", *(schema.MaxLength)) } if schema.MinLength != nil { result += indent + fmt.Sprintf("minLength: %+v\n", *(schema.MinLength)) } if schema.Pattern != nil { result += indent + fmt.Sprintf("pattern: %+v\n", *(schema.Pattern)) } if schema.AdditionalItems != nil { s := schema.AdditionalItems.Schema if s != nil { result += indent + "additionalItems:\n" result += s.describeSchema(indent + " ") } else { b := *(schema.AdditionalItems.Boolean) result += indent + fmt.Sprintf("additionalItems: %+v\n", b) } } if schema.Items != nil { result += indent + "items:\n" items := schema.Items if items.SchemaArray != nil { for i, s := range *(items.SchemaArray) { result += indent + " " + fmt.Sprintf("%d", i) + ":\n" result += s.describeSchema(indent + " " + " ") } } else if items.Schema != nil { result += items.Schema.describeSchema(indent + " " + " ") } } if schema.MaxItems != nil { result += indent + fmt.Sprintf("maxItems: %+v\n", *(schema.MaxItems)) } if schema.MinItems != nil { result += indent + fmt.Sprintf("minItems: %+v\n", *(schema.MinItems)) } if schema.UniqueItems != nil { result += indent + fmt.Sprintf("uniqueItems: %+v\n", *(schema.UniqueItems)) } if schema.MaxProperties != nil { result += indent + fmt.Sprintf("maxProperties: %+v\n", *(schema.MaxProperties)) } if schema.MinProperties != nil { result += indent + fmt.Sprintf("minProperties: %+v\n", *(schema.MinProperties)) } if schema.Required != nil { result += indent + fmt.Sprintf("required: %+v\n", *(schema.Required)) } if schema.AdditionalProperties != nil { s := schema.AdditionalProperties.Schema if s != nil { result += indent + "additionalProperties:\n" result += s.describeSchema(indent + " ") } else { b := *(schema.AdditionalProperties.Boolean) result += indent + fmt.Sprintf("additionalProperties: %+v\n", b) } } if schema.Properties != nil { result += indent + "properties:\n" for _, pair := range *(schema.Properties) { name := pair.Name s := pair.Value result += indent + " " + name + ":\n" result += s.describeSchema(indent + " " + " ") } } if schema.PatternProperties != nil { result += indent + "patternProperties:\n" for _, pair := range *(schema.PatternProperties) { name := pair.Name s := pair.Value result += indent + " " + name + ":\n" result += s.describeSchema(indent + " " + " ") } } if schema.Dependencies != nil { result += indent + "dependencies:\n" for _, pair := range *(schema.Dependencies) { name := pair.Name schemaOrStringArray := pair.Value s := schemaOrStringArray.Schema if s != nil { result += indent + " " + name + ":\n" result += s.describeSchema(indent + " " + " ") } else { a := schemaOrStringArray.StringArray if a != nil { result += indent + " " + name + ":\n" for _, s2 := range *a { result += indent + " " + " " + s2 + "\n" } } } } } if schema.Enumeration != nil { result += indent + "enumeration:\n" for _, value := range *(schema.Enumeration) { if value.String != nil { result += indent + " " + fmt.Sprintf("%+v\n", *value.String) } else { result += indent + " " + fmt.Sprintf("%+v\n", *value.Bool) } } } if schema.Type != nil { result += indent + fmt.Sprintf("type: %+v\n", schema.Type.Description()) } if schema.AllOf != nil { result += indent + "allOf:\n" for _, s := range *(schema.AllOf) { result += s.describeSchema(indent + " ") result += indent + "-\n" } } if schema.AnyOf != nil { result += indent + "anyOf:\n" for _, s := range *(schema.AnyOf) { result += s.describeSchema(indent + " ") result += indent + "-\n" } } if schema.OneOf != nil { result += indent + "oneOf:\n" for _, s := range *(schema.OneOf) { result += s.describeSchema(indent + " ") result += indent + "-\n" } } if schema.Not != nil { result += indent + "not:\n" result += schema.Not.describeSchema(indent + " ") } if schema.Definitions != nil { result += indent + "definitions:\n" for _, pair := range *(schema.Definitions) { name := pair.Name s := pair.Value result += indent + " " + name + ":\n" result += s.describeSchema(indent + " " + " ") } } if schema.Title != nil { result += indent + "title: " + *(schema.Title) + "\n" } if schema.Description != nil { result += indent + "description: " + *(schema.Description) + "\n" } if schema.Default != nil { result += indent + "default:\n" result += indent + fmt.Sprintf(" %+v\n", *(schema.Default)) } if schema.Format != nil { result += indent + "format: " + *(schema.Format) + "\n" } if schema.Ref != nil { result += indent + "$ref: " + *(schema.Ref) + "\n" } return result }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/google/gnostic-models/extensions/extension.pb.go
vendor/github.com/google/gnostic-models/extensions/extension.pb.go
// Copyright 2017 Google LLC. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.27.1 // protoc v3.19.3 // source: extensions/extension.proto package gnostic_extension_v1 import ( protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" anypb "google.golang.org/protobuf/types/known/anypb" reflect "reflect" sync "sync" ) const ( // Verify that this generated code is sufficiently up-to-date. _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) // Verify that runtime/protoimpl is sufficiently up-to-date. _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) // The version number of Gnostic. type Version struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields Major int32 `protobuf:"varint,1,opt,name=major,proto3" json:"major,omitempty"` Minor int32 `protobuf:"varint,2,opt,name=minor,proto3" json:"minor,omitempty"` Patch int32 `protobuf:"varint,3,opt,name=patch,proto3" json:"patch,omitempty"` // A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It should // be empty for mainline stable releases. Suffix string `protobuf:"bytes,4,opt,name=suffix,proto3" json:"suffix,omitempty"` } func (x *Version) Reset() { *x = Version{} if protoimpl.UnsafeEnabled { mi := &file_extensions_extension_proto_msgTypes[0] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *Version) String() string { return protoimpl.X.MessageStringOf(x) } func (*Version) ProtoMessage() {} func (x *Version) ProtoReflect() protoreflect.Message { mi := &file_extensions_extension_proto_msgTypes[0] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use Version.ProtoReflect.Descriptor instead. func (*Version) Descriptor() ([]byte, []int) { return file_extensions_extension_proto_rawDescGZIP(), []int{0} } func (x *Version) GetMajor() int32 { if x != nil { return x.Major } return 0 } func (x *Version) GetMinor() int32 { if x != nil { return x.Minor } return 0 } func (x *Version) GetPatch() int32 { if x != nil { return x.Patch } return 0 } func (x *Version) GetSuffix() string { if x != nil { return x.Suffix } return "" } // An encoded Request is written to the ExtensionHandler's stdin. type ExtensionHandlerRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields // The extension to process. Wrapper *Wrapper `protobuf:"bytes,1,opt,name=wrapper,proto3" json:"wrapper,omitempty"` // The version number of Gnostic. CompilerVersion *Version `protobuf:"bytes,2,opt,name=compiler_version,json=compilerVersion,proto3" json:"compiler_version,omitempty"` } func (x *ExtensionHandlerRequest) Reset() { *x = ExtensionHandlerRequest{} if protoimpl.UnsafeEnabled { mi := &file_extensions_extension_proto_msgTypes[1] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *ExtensionHandlerRequest) String() string { return protoimpl.X.MessageStringOf(x) } func (*ExtensionHandlerRequest) ProtoMessage() {} func (x *ExtensionHandlerRequest) ProtoReflect() protoreflect.Message { mi := &file_extensions_extension_proto_msgTypes[1] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use ExtensionHandlerRequest.ProtoReflect.Descriptor instead. func (*ExtensionHandlerRequest) Descriptor() ([]byte, []int) { return file_extensions_extension_proto_rawDescGZIP(), []int{1} } func (x *ExtensionHandlerRequest) GetWrapper() *Wrapper { if x != nil { return x.Wrapper } return nil } func (x *ExtensionHandlerRequest) GetCompilerVersion() *Version { if x != nil { return x.CompilerVersion } return nil } // The extensions writes an encoded ExtensionHandlerResponse to stdout. type ExtensionHandlerResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields // true if the extension is handled by the extension handler; false otherwise Handled bool `protobuf:"varint,1,opt,name=handled,proto3" json:"handled,omitempty"` // Error message(s). If non-empty, the extension handling failed. // The extension handler process should exit with status code zero // even if it reports an error in this way. // // This should be used to indicate errors which prevent the extension from // operating as intended. Errors which indicate a problem in gnostic // itself -- such as the input Document being unparseable -- should be // reported by writing a message to stderr and exiting with a non-zero // status code. Errors []string `protobuf:"bytes,2,rep,name=errors,proto3" json:"errors,omitempty"` // text output Value *anypb.Any `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"` } func (x *ExtensionHandlerResponse) Reset() { *x = ExtensionHandlerResponse{} if protoimpl.UnsafeEnabled { mi := &file_extensions_extension_proto_msgTypes[2] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *ExtensionHandlerResponse) String() string { return protoimpl.X.MessageStringOf(x) } func (*ExtensionHandlerResponse) ProtoMessage() {} func (x *ExtensionHandlerResponse) ProtoReflect() protoreflect.Message { mi := &file_extensions_extension_proto_msgTypes[2] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use ExtensionHandlerResponse.ProtoReflect.Descriptor instead. func (*ExtensionHandlerResponse) Descriptor() ([]byte, []int) { return file_extensions_extension_proto_rawDescGZIP(), []int{2} } func (x *ExtensionHandlerResponse) GetHandled() bool { if x != nil { return x.Handled } return false } func (x *ExtensionHandlerResponse) GetErrors() []string { if x != nil { return x.Errors } return nil } func (x *ExtensionHandlerResponse) GetValue() *anypb.Any { if x != nil { return x.Value } return nil } type Wrapper struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields // version of the OpenAPI specification in which this extension was written. Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"` // Name of the extension. ExtensionName string `protobuf:"bytes,2,opt,name=extension_name,json=extensionName,proto3" json:"extension_name,omitempty"` // YAML-formatted extension value. Yaml string `protobuf:"bytes,3,opt,name=yaml,proto3" json:"yaml,omitempty"` } func (x *Wrapper) Reset() { *x = Wrapper{} if protoimpl.UnsafeEnabled { mi := &file_extensions_extension_proto_msgTypes[3] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *Wrapper) String() string { return protoimpl.X.MessageStringOf(x) } func (*Wrapper) ProtoMessage() {} func (x *Wrapper) ProtoReflect() protoreflect.Message { mi := &file_extensions_extension_proto_msgTypes[3] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use Wrapper.ProtoReflect.Descriptor instead. func (*Wrapper) Descriptor() ([]byte, []int) { return file_extensions_extension_proto_rawDescGZIP(), []int{3} } func (x *Wrapper) GetVersion() string { if x != nil { return x.Version } return "" } func (x *Wrapper) GetExtensionName() string { if x != nil { return x.ExtensionName } return "" } func (x *Wrapper) GetYaml() string { if x != nil { return x.Yaml } return "" } var File_extensions_extension_proto protoreflect.FileDescriptor var file_extensions_extension_proto_rawDesc = []byte{ 0x0a, 0x1a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x63, 0x0a, 0x07, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x6d, 0x61, 0x6a, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x6d, 0x61, 0x6a, 0x6f, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x6d, 0x69, 0x6e, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x6d, 0x69, 0x6e, 0x6f, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x70, 0x61, 0x74, 0x63, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x70, 0x61, 0x74, 0x63, 0x68, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x75, 0x66, 0x66, 0x69, 0x78, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x75, 0x66, 0x66, 0x69, 0x78, 0x22, 0x9c, 0x01, 0x0a, 0x17, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x37, 0x0a, 0x07, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x57, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x52, 0x07, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x12, 0x48, 0x0a, 0x10, 0x63, 0x6f, 0x6d, 0x70, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0f, 0x63, 0x6f, 0x6d, 0x70, 0x69, 0x6c, 0x65, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x78, 0x0a, 0x18, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x12, 0x2a, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x5e, 0x0a, 0x07, 0x57, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x25, 0x0a, 0x0e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x79, 0x61, 0x6d, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x79, 0x61, 0x6d, 0x6c, 0x42, 0x4d, 0x0a, 0x0e, 0x6f, 0x72, 0x67, 0x2e, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x2e, 0x76, 0x31, 0x42, 0x10, 0x47, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x50, 0x01, 0x5a, 0x21, 0x2e, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x76, 0x31, 0xa2, 0x02, 0x03, 0x47, 0x4e, 0x58, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( file_extensions_extension_proto_rawDescOnce sync.Once file_extensions_extension_proto_rawDescData = file_extensions_extension_proto_rawDesc ) func file_extensions_extension_proto_rawDescGZIP() []byte { file_extensions_extension_proto_rawDescOnce.Do(func() { file_extensions_extension_proto_rawDescData = protoimpl.X.CompressGZIP(file_extensions_extension_proto_rawDescData) }) return file_extensions_extension_proto_rawDescData } var file_extensions_extension_proto_msgTypes = make([]protoimpl.MessageInfo, 4) var file_extensions_extension_proto_goTypes = []interface{}{ (*Version)(nil), // 0: gnostic.extension.v1.Version (*ExtensionHandlerRequest)(nil), // 1: gnostic.extension.v1.ExtensionHandlerRequest (*ExtensionHandlerResponse)(nil), // 2: gnostic.extension.v1.ExtensionHandlerResponse (*Wrapper)(nil), // 3: gnostic.extension.v1.Wrapper (*anypb.Any)(nil), // 4: google.protobuf.Any } var file_extensions_extension_proto_depIdxs = []int32{ 3, // 0: gnostic.extension.v1.ExtensionHandlerRequest.wrapper:type_name -> gnostic.extension.v1.Wrapper 0, // 1: gnostic.extension.v1.ExtensionHandlerRequest.compiler_version:type_name -> gnostic.extension.v1.Version 4, // 2: gnostic.extension.v1.ExtensionHandlerResponse.value:type_name -> google.protobuf.Any 3, // [3:3] is the sub-list for method output_type 3, // [3:3] is the sub-list for method input_type 3, // [3:3] is the sub-list for extension type_name 3, // [3:3] is the sub-list for extension extendee 0, // [0:3] is the sub-list for field type_name } func init() { file_extensions_extension_proto_init() } func file_extensions_extension_proto_init() { if File_extensions_extension_proto != nil { return } if !protoimpl.UnsafeEnabled { file_extensions_extension_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Version); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_extensions_extension_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ExtensionHandlerRequest); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_extensions_extension_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ExtensionHandlerResponse); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_extensions_extension_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Wrapper); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_extensions_extension_proto_rawDesc, NumEnums: 0, NumMessages: 4, NumExtensions: 0, NumServices: 0, }, GoTypes: file_extensions_extension_proto_goTypes, DependencyIndexes: file_extensions_extension_proto_depIdxs, MessageInfos: file_extensions_extension_proto_msgTypes, }.Build() File_extensions_extension_proto = out.File file_extensions_extension_proto_rawDesc = nil file_extensions_extension_proto_goTypes = nil file_extensions_extension_proto_depIdxs = nil }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/google/gnostic-models/extensions/extensions.go
vendor/github.com/google/gnostic-models/extensions/extensions.go
// Copyright 2017 Google LLC. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package gnostic_extension_v1 import ( "io/ioutil" "log" "os" "github.com/golang/protobuf/proto" "github.com/golang/protobuf/ptypes" ) type extensionHandler func(name string, yamlInput string) (bool, proto.Message, error) // Main implements the main program of an extension handler. func Main(handler extensionHandler) { // unpack the request data, err := ioutil.ReadAll(os.Stdin) if err != nil { log.Println("File error:", err.Error()) os.Exit(1) } if len(data) == 0 { log.Println("No input data.") os.Exit(1) } request := &ExtensionHandlerRequest{} err = proto.Unmarshal(data, request) if err != nil { log.Println("Input error:", err.Error()) os.Exit(1) } // call the handler handled, output, err := handler(request.Wrapper.ExtensionName, request.Wrapper.Yaml) // respond with the output of the handler response := &ExtensionHandlerResponse{ Handled: false, // default assumption Errors: make([]string, 0), } if err != nil { response.Errors = append(response.Errors, err.Error()) } else if handled { response.Handled = true response.Value, err = ptypes.MarshalAny(output) if err != nil { response.Errors = append(response.Errors, err.Error()) } } responseBytes, _ := proto.Marshal(response) os.Stdout.Write(responseBytes) }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/google/gnostic-models/openapiv2/document.go
vendor/github.com/google/gnostic-models/openapiv2/document.go
// Copyright 2020 Google LLC. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package openapi_v2 import ( "gopkg.in/yaml.v3" "github.com/google/gnostic-models/compiler" ) // ParseDocument reads an OpenAPI v2 description from a YAML/JSON representation. func ParseDocument(b []byte) (*Document, error) { info, err := compiler.ReadInfoFromBytes("", b) if err != nil { return nil, err } root := info.Content[0] return NewDocument(root, compiler.NewContextWithExtensions("$root", root, nil, nil)) } // YAMLValue produces a serialized YAML representation of the document. func (d *Document) YAMLValue(comment string) ([]byte, error) { rawInfo := d.ToRawInfo() rawInfo = &yaml.Node{ Kind: yaml.DocumentNode, Content: []*yaml.Node{rawInfo}, HeadComment: comment, } return yaml.Marshal(rawInfo) }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/google/gnostic-models/openapiv2/OpenAPIv2.go
vendor/github.com/google/gnostic-models/openapiv2/OpenAPIv2.go
// Copyright 2020 Google LLC. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // THIS FILE IS AUTOMATICALLY GENERATED. package openapi_v2 import ( "fmt" "regexp" "strings" "gopkg.in/yaml.v3" "github.com/google/gnostic-models/compiler" ) // Version returns the package name (and OpenAPI version). func Version() string { return "openapi_v2" } // NewAdditionalPropertiesItem creates an object of type AdditionalPropertiesItem if possible, returning an error if not. func NewAdditionalPropertiesItem(in *yaml.Node, context *compiler.Context) (*AdditionalPropertiesItem, error) { errors := make([]error, 0) x := &AdditionalPropertiesItem{} matched := false // Schema schema = 1; { m, ok := compiler.UnpackMap(in) if ok { // errors might be ok here, they mean we just don't have the right subtype t, matchingError := NewSchema(m, compiler.NewContext("schema", m, context)) if matchingError == nil { x.Oneof = &AdditionalPropertiesItem_Schema{Schema: t} matched = true } else { errors = append(errors, matchingError) } } } // bool boolean = 2; boolValue, ok := compiler.BoolForScalarNode(in) if ok { x.Oneof = &AdditionalPropertiesItem_Boolean{Boolean: boolValue} matched = true } if matched { // since the oneof matched one of its possibilities, discard any matching errors errors = make([]error, 0) } else { message := fmt.Sprintf("contains an invalid AdditionalPropertiesItem") err := compiler.NewError(context, message) errors = []error{err} } return x, compiler.NewErrorGroupOrNil(errors) } // NewAny creates an object of type Any if possible, returning an error if not. func NewAny(in *yaml.Node, context *compiler.Context) (*Any, error) { errors := make([]error, 0) x := &Any{} bytes := compiler.Marshal(in) x.Yaml = string(bytes) return x, compiler.NewErrorGroupOrNil(errors) } // NewApiKeySecurity creates an object of type ApiKeySecurity if possible, returning an error if not. func NewApiKeySecurity(in *yaml.Node, context *compiler.Context) (*ApiKeySecurity, error) { errors := make([]error, 0) x := &ApiKeySecurity{} m, ok := compiler.UnpackMap(in) if !ok { message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) errors = append(errors, compiler.NewError(context, message)) } else { requiredKeys := []string{"in", "name", "type"} missingKeys := compiler.MissingKeysInMap(m, requiredKeys) if len(missingKeys) > 0 { message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) errors = append(errors, compiler.NewError(context, message)) } allowedKeys := []string{"description", "in", "name", "type"} allowedPatterns := []*regexp.Regexp{pattern0} invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) if len(invalidKeys) > 0 { message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) errors = append(errors, compiler.NewError(context, message)) } // string type = 1; v1 := compiler.MapValueForKey(m, "type") if v1 != nil { x.Type, ok = compiler.StringForScalarNode(v1) if !ok { message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1)) errors = append(errors, compiler.NewError(context, message)) } // check for valid enum values // [apiKey] if ok && !compiler.StringArrayContainsValue([]string{"apiKey"}, x.Type) { message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1)) errors = append(errors, compiler.NewError(context, message)) } } // string name = 2; v2 := compiler.MapValueForKey(m, "name") if v2 != nil { x.Name, ok = compiler.StringForScalarNode(v2) if !ok { message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v2)) errors = append(errors, compiler.NewError(context, message)) } } // string in = 3; v3 := compiler.MapValueForKey(m, "in") if v3 != nil { x.In, ok = compiler.StringForScalarNode(v3) if !ok { message := fmt.Sprintf("has unexpected value for in: %s", compiler.Display(v3)) errors = append(errors, compiler.NewError(context, message)) } // check for valid enum values // [header query] if ok && !compiler.StringArrayContainsValue([]string{"header", "query"}, x.In) { message := fmt.Sprintf("has unexpected value for in: %s", compiler.Display(v3)) errors = append(errors, compiler.NewError(context, message)) } } // string description = 4; v4 := compiler.MapValueForKey(m, "description") if v4 != nil { x.Description, ok = compiler.StringForScalarNode(v4) if !ok { message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v4)) errors = append(errors, compiler.NewError(context, message)) } } // repeated NamedAny vendor_extension = 5; // MAP: Any ^x- x.VendorExtension = make([]*NamedAny, 0) for i := 0; i < len(m.Content); i += 2 { k, ok := compiler.StringForScalarNode(m.Content[i]) if ok { v := m.Content[i+1] if strings.HasPrefix(k, "x-") { pair := &NamedAny{} pair.Name = k result := &Any{} handled, resultFromExt, err := compiler.CallExtension(context, v, k) if handled { if err != nil { errors = append(errors, err) } else { bytes := compiler.Marshal(v) result.Yaml = string(bytes) result.Value = resultFromExt pair.Value = result } } else { pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) if err != nil { errors = append(errors, err) } } x.VendorExtension = append(x.VendorExtension, pair) } } } } return x, compiler.NewErrorGroupOrNil(errors) } // NewBasicAuthenticationSecurity creates an object of type BasicAuthenticationSecurity if possible, returning an error if not. func NewBasicAuthenticationSecurity(in *yaml.Node, context *compiler.Context) (*BasicAuthenticationSecurity, error) { errors := make([]error, 0) x := &BasicAuthenticationSecurity{} m, ok := compiler.UnpackMap(in) if !ok { message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) errors = append(errors, compiler.NewError(context, message)) } else { requiredKeys := []string{"type"} missingKeys := compiler.MissingKeysInMap(m, requiredKeys) if len(missingKeys) > 0 { message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) errors = append(errors, compiler.NewError(context, message)) } allowedKeys := []string{"description", "type"} allowedPatterns := []*regexp.Regexp{pattern0} invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) if len(invalidKeys) > 0 { message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) errors = append(errors, compiler.NewError(context, message)) } // string type = 1; v1 := compiler.MapValueForKey(m, "type") if v1 != nil { x.Type, ok = compiler.StringForScalarNode(v1) if !ok { message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1)) errors = append(errors, compiler.NewError(context, message)) } // check for valid enum values // [basic] if ok && !compiler.StringArrayContainsValue([]string{"basic"}, x.Type) { message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1)) errors = append(errors, compiler.NewError(context, message)) } } // string description = 2; v2 := compiler.MapValueForKey(m, "description") if v2 != nil { x.Description, ok = compiler.StringForScalarNode(v2) if !ok { message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v2)) errors = append(errors, compiler.NewError(context, message)) } } // repeated NamedAny vendor_extension = 3; // MAP: Any ^x- x.VendorExtension = make([]*NamedAny, 0) for i := 0; i < len(m.Content); i += 2 { k, ok := compiler.StringForScalarNode(m.Content[i]) if ok { v := m.Content[i+1] if strings.HasPrefix(k, "x-") { pair := &NamedAny{} pair.Name = k result := &Any{} handled, resultFromExt, err := compiler.CallExtension(context, v, k) if handled { if err != nil { errors = append(errors, err) } else { bytes := compiler.Marshal(v) result.Yaml = string(bytes) result.Value = resultFromExt pair.Value = result } } else { pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) if err != nil { errors = append(errors, err) } } x.VendorExtension = append(x.VendorExtension, pair) } } } } return x, compiler.NewErrorGroupOrNil(errors) } // NewBodyParameter creates an object of type BodyParameter if possible, returning an error if not. func NewBodyParameter(in *yaml.Node, context *compiler.Context) (*BodyParameter, error) { errors := make([]error, 0) x := &BodyParameter{} m, ok := compiler.UnpackMap(in) if !ok { message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) errors = append(errors, compiler.NewError(context, message)) } else { requiredKeys := []string{"in", "name", "schema"} missingKeys := compiler.MissingKeysInMap(m, requiredKeys) if len(missingKeys) > 0 { message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) errors = append(errors, compiler.NewError(context, message)) } allowedKeys := []string{"description", "in", "name", "required", "schema"} allowedPatterns := []*regexp.Regexp{pattern0} invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) if len(invalidKeys) > 0 { message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) errors = append(errors, compiler.NewError(context, message)) } // string description = 1; v1 := compiler.MapValueForKey(m, "description") if v1 != nil { x.Description, ok = compiler.StringForScalarNode(v1) if !ok { message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v1)) errors = append(errors, compiler.NewError(context, message)) } } // string name = 2; v2 := compiler.MapValueForKey(m, "name") if v2 != nil { x.Name, ok = compiler.StringForScalarNode(v2) if !ok { message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v2)) errors = append(errors, compiler.NewError(context, message)) } } // string in = 3; v3 := compiler.MapValueForKey(m, "in") if v3 != nil { x.In, ok = compiler.StringForScalarNode(v3) if !ok { message := fmt.Sprintf("has unexpected value for in: %s", compiler.Display(v3)) errors = append(errors, compiler.NewError(context, message)) } // check for valid enum values // [body] if ok && !compiler.StringArrayContainsValue([]string{"body"}, x.In) { message := fmt.Sprintf("has unexpected value for in: %s", compiler.Display(v3)) errors = append(errors, compiler.NewError(context, message)) } } // bool required = 4; v4 := compiler.MapValueForKey(m, "required") if v4 != nil { x.Required, ok = compiler.BoolForScalarNode(v4) if !ok { message := fmt.Sprintf("has unexpected value for required: %s", compiler.Display(v4)) errors = append(errors, compiler.NewError(context, message)) } } // Schema schema = 5; v5 := compiler.MapValueForKey(m, "schema") if v5 != nil { var err error x.Schema, err = NewSchema(v5, compiler.NewContext("schema", v5, context)) if err != nil { errors = append(errors, err) } } // repeated NamedAny vendor_extension = 6; // MAP: Any ^x- x.VendorExtension = make([]*NamedAny, 0) for i := 0; i < len(m.Content); i += 2 { k, ok := compiler.StringForScalarNode(m.Content[i]) if ok { v := m.Content[i+1] if strings.HasPrefix(k, "x-") { pair := &NamedAny{} pair.Name = k result := &Any{} handled, resultFromExt, err := compiler.CallExtension(context, v, k) if handled { if err != nil { errors = append(errors, err) } else { bytes := compiler.Marshal(v) result.Yaml = string(bytes) result.Value = resultFromExt pair.Value = result } } else { pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) if err != nil { errors = append(errors, err) } } x.VendorExtension = append(x.VendorExtension, pair) } } } } return x, compiler.NewErrorGroupOrNil(errors) } // NewContact creates an object of type Contact if possible, returning an error if not. func NewContact(in *yaml.Node, context *compiler.Context) (*Contact, error) { errors := make([]error, 0) x := &Contact{} m, ok := compiler.UnpackMap(in) if !ok { message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) errors = append(errors, compiler.NewError(context, message)) } else { allowedKeys := []string{"email", "name", "url"} allowedPatterns := []*regexp.Regexp{pattern0} invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) if len(invalidKeys) > 0 { message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) errors = append(errors, compiler.NewError(context, message)) } // string name = 1; v1 := compiler.MapValueForKey(m, "name") if v1 != nil { x.Name, ok = compiler.StringForScalarNode(v1) if !ok { message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) errors = append(errors, compiler.NewError(context, message)) } } // string url = 2; v2 := compiler.MapValueForKey(m, "url") if v2 != nil { x.Url, ok = compiler.StringForScalarNode(v2) if !ok { message := fmt.Sprintf("has unexpected value for url: %s", compiler.Display(v2)) errors = append(errors, compiler.NewError(context, message)) } } // string email = 3; v3 := compiler.MapValueForKey(m, "email") if v3 != nil { x.Email, ok = compiler.StringForScalarNode(v3) if !ok { message := fmt.Sprintf("has unexpected value for email: %s", compiler.Display(v3)) errors = append(errors, compiler.NewError(context, message)) } } // repeated NamedAny vendor_extension = 4; // MAP: Any ^x- x.VendorExtension = make([]*NamedAny, 0) for i := 0; i < len(m.Content); i += 2 { k, ok := compiler.StringForScalarNode(m.Content[i]) if ok { v := m.Content[i+1] if strings.HasPrefix(k, "x-") { pair := &NamedAny{} pair.Name = k result := &Any{} handled, resultFromExt, err := compiler.CallExtension(context, v, k) if handled { if err != nil { errors = append(errors, err) } else { bytes := compiler.Marshal(v) result.Yaml = string(bytes) result.Value = resultFromExt pair.Value = result } } else { pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) if err != nil { errors = append(errors, err) } } x.VendorExtension = append(x.VendorExtension, pair) } } } } return x, compiler.NewErrorGroupOrNil(errors) } // NewDefault creates an object of type Default if possible, returning an error if not. func NewDefault(in *yaml.Node, context *compiler.Context) (*Default, error) { errors := make([]error, 0) x := &Default{} m, ok := compiler.UnpackMap(in) if !ok { message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) errors = append(errors, compiler.NewError(context, message)) } else { // repeated NamedAny additional_properties = 1; // MAP: Any x.AdditionalProperties = make([]*NamedAny, 0) for i := 0; i < len(m.Content); i += 2 { k, ok := compiler.StringForScalarNode(m.Content[i]) if ok { v := m.Content[i+1] pair := &NamedAny{} pair.Name = k result := &Any{} handled, resultFromExt, err := compiler.CallExtension(context, v, k) if handled { if err != nil { errors = append(errors, err) } else { bytes := compiler.Marshal(v) result.Yaml = string(bytes) result.Value = resultFromExt pair.Value = result } } else { pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) if err != nil { errors = append(errors, err) } } x.AdditionalProperties = append(x.AdditionalProperties, pair) } } } return x, compiler.NewErrorGroupOrNil(errors) } // NewDefinitions creates an object of type Definitions if possible, returning an error if not. func NewDefinitions(in *yaml.Node, context *compiler.Context) (*Definitions, error) { errors := make([]error, 0) x := &Definitions{} m, ok := compiler.UnpackMap(in) if !ok { message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) errors = append(errors, compiler.NewError(context, message)) } else { // repeated NamedSchema additional_properties = 1; // MAP: Schema x.AdditionalProperties = make([]*NamedSchema, 0) for i := 0; i < len(m.Content); i += 2 { k, ok := compiler.StringForScalarNode(m.Content[i]) if ok { v := m.Content[i+1] pair := &NamedSchema{} pair.Name = k var err error pair.Value, err = NewSchema(v, compiler.NewContext(k, v, context)) if err != nil { errors = append(errors, err) } x.AdditionalProperties = append(x.AdditionalProperties, pair) } } } return x, compiler.NewErrorGroupOrNil(errors) } // NewDocument creates an object of type Document if possible, returning an error if not. func NewDocument(in *yaml.Node, context *compiler.Context) (*Document, error) { errors := make([]error, 0) x := &Document{} m, ok := compiler.UnpackMap(in) if !ok { message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) errors = append(errors, compiler.NewError(context, message)) } else { requiredKeys := []string{"info", "paths", "swagger"} missingKeys := compiler.MissingKeysInMap(m, requiredKeys) if len(missingKeys) > 0 { message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) errors = append(errors, compiler.NewError(context, message)) } allowedKeys := []string{"basePath", "consumes", "definitions", "externalDocs", "host", "info", "parameters", "paths", "produces", "responses", "schemes", "security", "securityDefinitions", "swagger", "tags"} allowedPatterns := []*regexp.Regexp{pattern0} invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) if len(invalidKeys) > 0 { message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) errors = append(errors, compiler.NewError(context, message)) } // string swagger = 1; v1 := compiler.MapValueForKey(m, "swagger") if v1 != nil { x.Swagger, ok = compiler.StringForScalarNode(v1) if !ok { message := fmt.Sprintf("has unexpected value for swagger: %s", compiler.Display(v1)) errors = append(errors, compiler.NewError(context, message)) } // check for valid enum values // [2.0] if ok && !compiler.StringArrayContainsValue([]string{"2.0"}, x.Swagger) { message := fmt.Sprintf("has unexpected value for swagger: %s", compiler.Display(v1)) errors = append(errors, compiler.NewError(context, message)) } } // Info info = 2; v2 := compiler.MapValueForKey(m, "info") if v2 != nil { var err error x.Info, err = NewInfo(v2, compiler.NewContext("info", v2, context)) if err != nil { errors = append(errors, err) } } // string host = 3; v3 := compiler.MapValueForKey(m, "host") if v3 != nil { x.Host, ok = compiler.StringForScalarNode(v3) if !ok { message := fmt.Sprintf("has unexpected value for host: %s", compiler.Display(v3)) errors = append(errors, compiler.NewError(context, message)) } } // string base_path = 4; v4 := compiler.MapValueForKey(m, "basePath") if v4 != nil { x.BasePath, ok = compiler.StringForScalarNode(v4) if !ok { message := fmt.Sprintf("has unexpected value for basePath: %s", compiler.Display(v4)) errors = append(errors, compiler.NewError(context, message)) } } // repeated string schemes = 5; v5 := compiler.MapValueForKey(m, "schemes") if v5 != nil { v, ok := compiler.SequenceNodeForNode(v5) if ok { x.Schemes = compiler.StringArrayForSequenceNode(v) } else { message := fmt.Sprintf("has unexpected value for schemes: %s", compiler.Display(v5)) errors = append(errors, compiler.NewError(context, message)) } // check for valid enum values // [http https ws wss] if ok && !compiler.StringArrayContainsValues([]string{"http", "https", "ws", "wss"}, x.Schemes) { message := fmt.Sprintf("has unexpected value for schemes: %s", compiler.Display(v5)) errors = append(errors, compiler.NewError(context, message)) } } // repeated string consumes = 6; v6 := compiler.MapValueForKey(m, "consumes") if v6 != nil { v, ok := compiler.SequenceNodeForNode(v6) if ok { x.Consumes = compiler.StringArrayForSequenceNode(v) } else { message := fmt.Sprintf("has unexpected value for consumes: %s", compiler.Display(v6)) errors = append(errors, compiler.NewError(context, message)) } } // repeated string produces = 7; v7 := compiler.MapValueForKey(m, "produces") if v7 != nil { v, ok := compiler.SequenceNodeForNode(v7) if ok { x.Produces = compiler.StringArrayForSequenceNode(v) } else { message := fmt.Sprintf("has unexpected value for produces: %s", compiler.Display(v7)) errors = append(errors, compiler.NewError(context, message)) } } // Paths paths = 8; v8 := compiler.MapValueForKey(m, "paths") if v8 != nil { var err error x.Paths, err = NewPaths(v8, compiler.NewContext("paths", v8, context)) if err != nil { errors = append(errors, err) } } // Definitions definitions = 9; v9 := compiler.MapValueForKey(m, "definitions") if v9 != nil { var err error x.Definitions, err = NewDefinitions(v9, compiler.NewContext("definitions", v9, context)) if err != nil { errors = append(errors, err) } } // ParameterDefinitions parameters = 10; v10 := compiler.MapValueForKey(m, "parameters") if v10 != nil { var err error x.Parameters, err = NewParameterDefinitions(v10, compiler.NewContext("parameters", v10, context)) if err != nil { errors = append(errors, err) } } // ResponseDefinitions responses = 11; v11 := compiler.MapValueForKey(m, "responses") if v11 != nil { var err error x.Responses, err = NewResponseDefinitions(v11, compiler.NewContext("responses", v11, context)) if err != nil { errors = append(errors, err) } } // repeated SecurityRequirement security = 12; v12 := compiler.MapValueForKey(m, "security") if v12 != nil { // repeated SecurityRequirement x.Security = make([]*SecurityRequirement, 0) a, ok := compiler.SequenceNodeForNode(v12) if ok { for _, item := range a.Content { y, err := NewSecurityRequirement(item, compiler.NewContext("security", item, context)) if err != nil { errors = append(errors, err) } x.Security = append(x.Security, y) } } } // SecurityDefinitions security_definitions = 13; v13 := compiler.MapValueForKey(m, "securityDefinitions") if v13 != nil { var err error x.SecurityDefinitions, err = NewSecurityDefinitions(v13, compiler.NewContext("securityDefinitions", v13, context)) if err != nil { errors = append(errors, err) } } // repeated Tag tags = 14; v14 := compiler.MapValueForKey(m, "tags") if v14 != nil { // repeated Tag x.Tags = make([]*Tag, 0) a, ok := compiler.SequenceNodeForNode(v14) if ok { for _, item := range a.Content { y, err := NewTag(item, compiler.NewContext("tags", item, context)) if err != nil { errors = append(errors, err) } x.Tags = append(x.Tags, y) } } } // ExternalDocs external_docs = 15; v15 := compiler.MapValueForKey(m, "externalDocs") if v15 != nil { var err error x.ExternalDocs, err = NewExternalDocs(v15, compiler.NewContext("externalDocs", v15, context)) if err != nil { errors = append(errors, err) } } // repeated NamedAny vendor_extension = 16; // MAP: Any ^x- x.VendorExtension = make([]*NamedAny, 0) for i := 0; i < len(m.Content); i += 2 { k, ok := compiler.StringForScalarNode(m.Content[i]) if ok { v := m.Content[i+1] if strings.HasPrefix(k, "x-") { pair := &NamedAny{} pair.Name = k result := &Any{} handled, resultFromExt, err := compiler.CallExtension(context, v, k) if handled { if err != nil { errors = append(errors, err) } else { bytes := compiler.Marshal(v) result.Yaml = string(bytes) result.Value = resultFromExt pair.Value = result } } else { pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) if err != nil { errors = append(errors, err) } } x.VendorExtension = append(x.VendorExtension, pair) } } } } return x, compiler.NewErrorGroupOrNil(errors) } // NewExamples creates an object of type Examples if possible, returning an error if not. func NewExamples(in *yaml.Node, context *compiler.Context) (*Examples, error) { errors := make([]error, 0) x := &Examples{} m, ok := compiler.UnpackMap(in) if !ok { message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) errors = append(errors, compiler.NewError(context, message)) } else { // repeated NamedAny additional_properties = 1; // MAP: Any x.AdditionalProperties = make([]*NamedAny, 0) for i := 0; i < len(m.Content); i += 2 { k, ok := compiler.StringForScalarNode(m.Content[i]) if ok { v := m.Content[i+1] pair := &NamedAny{} pair.Name = k result := &Any{} handled, resultFromExt, err := compiler.CallExtension(context, v, k) if handled { if err != nil { errors = append(errors, err) } else { bytes := compiler.Marshal(v) result.Yaml = string(bytes) result.Value = resultFromExt pair.Value = result } } else { pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) if err != nil { errors = append(errors, err) } } x.AdditionalProperties = append(x.AdditionalProperties, pair) } } } return x, compiler.NewErrorGroupOrNil(errors) } // NewExternalDocs creates an object of type ExternalDocs if possible, returning an error if not. func NewExternalDocs(in *yaml.Node, context *compiler.Context) (*ExternalDocs, error) { errors := make([]error, 0) x := &ExternalDocs{} m, ok := compiler.UnpackMap(in) if !ok { message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) errors = append(errors, compiler.NewError(context, message)) } else { requiredKeys := []string{"url"} missingKeys := compiler.MissingKeysInMap(m, requiredKeys) if len(missingKeys) > 0 { message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) errors = append(errors, compiler.NewError(context, message)) } allowedKeys := []string{"description", "url"} allowedPatterns := []*regexp.Regexp{pattern0} invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) if len(invalidKeys) > 0 { message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) errors = append(errors, compiler.NewError(context, message)) } // string description = 1; v1 := compiler.MapValueForKey(m, "description") if v1 != nil { x.Description, ok = compiler.StringForScalarNode(v1) if !ok { message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v1)) errors = append(errors, compiler.NewError(context, message)) } } // string url = 2; v2 := compiler.MapValueForKey(m, "url") if v2 != nil { x.Url, ok = compiler.StringForScalarNode(v2) if !ok { message := fmt.Sprintf("has unexpected value for url: %s", compiler.Display(v2)) errors = append(errors, compiler.NewError(context, message)) } } // repeated NamedAny vendor_extension = 3; // MAP: Any ^x- x.VendorExtension = make([]*NamedAny, 0) for i := 0; i < len(m.Content); i += 2 { k, ok := compiler.StringForScalarNode(m.Content[i]) if ok { v := m.Content[i+1] if strings.HasPrefix(k, "x-") { pair := &NamedAny{} pair.Name = k result := &Any{} handled, resultFromExt, err := compiler.CallExtension(context, v, k) if handled { if err != nil { errors = append(errors, err) } else { bytes := compiler.Marshal(v) result.Yaml = string(bytes) result.Value = resultFromExt pair.Value = result } } else { pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) if err != nil { errors = append(errors, err) } } x.VendorExtension = append(x.VendorExtension, pair) } } } } return x, compiler.NewErrorGroupOrNil(errors) } // NewFileSchema creates an object of type FileSchema if possible, returning an error if not. func NewFileSchema(in *yaml.Node, context *compiler.Context) (*FileSchema, error) { errors := make([]error, 0) x := &FileSchema{} m, ok := compiler.UnpackMap(in) if !ok { message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) errors = append(errors, compiler.NewError(context, message)) } else { requiredKeys := []string{"type"} missingKeys := compiler.MissingKeysInMap(m, requiredKeys) if len(missingKeys) > 0 { message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) errors = append(errors, compiler.NewError(context, message)) } allowedKeys := []string{"default", "description", "example", "externalDocs", "format", "readOnly", "required", "title", "type"} allowedPatterns := []*regexp.Regexp{pattern0} invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) if len(invalidKeys) > 0 { message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) errors = append(errors, compiler.NewError(context, message)) } // string format = 1; v1 := compiler.MapValueForKey(m, "format") if v1 != nil { x.Format, ok = compiler.StringForScalarNode(v1) if !ok { message := fmt.Sprintf("has unexpected value for format: %s", compiler.Display(v1)) errors = append(errors, compiler.NewError(context, message)) } } // string title = 2; v2 := compiler.MapValueForKey(m, "title") if v2 != nil { x.Title, ok = compiler.StringForScalarNode(v2) if !ok { message := fmt.Sprintf("has unexpected value for title: %s", compiler.Display(v2)) errors = append(errors, compiler.NewError(context, message)) } } // string description = 3; v3 := compiler.MapValueForKey(m, "description") if v3 != nil { x.Description, ok = compiler.StringForScalarNode(v3) if !ok { message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v3)) errors = append(errors, compiler.NewError(context, message)) } } // Any default = 4; v4 := compiler.MapValueForKey(m, "default") if v4 != nil { var err error x.Default, err = NewAny(v4, compiler.NewContext("default", v4, context)) if err != nil { errors = append(errors, err) } } // repeated string required = 5; v5 := compiler.MapValueForKey(m, "required") if v5 != nil { v, ok := compiler.SequenceNodeForNode(v5) if ok { x.Required = compiler.StringArrayForSequenceNode(v) } else { message := fmt.Sprintf("has unexpected value for required: %s", compiler.Display(v5)) errors = append(errors, compiler.NewError(context, message)) } } // string type = 6; v6 := compiler.MapValueForKey(m, "type") if v6 != nil {
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
true
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/google/gnostic-models/openapiv2/OpenAPIv2.pb.go
vendor/github.com/google/gnostic-models/openapiv2/OpenAPIv2.pb.go
// Copyright 2020 Google LLC. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // THIS FILE IS AUTOMATICALLY GENERATED. // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.27.1 // protoc v3.19.3 // source: openapiv2/OpenAPIv2.proto package openapi_v2 import ( protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" anypb "google.golang.org/protobuf/types/known/anypb" reflect "reflect" sync "sync" ) const ( // Verify that this generated code is sufficiently up-to-date. _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) // Verify that runtime/protoimpl is sufficiently up-to-date. _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) type AdditionalPropertiesItem struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields // Types that are assignable to Oneof: // *AdditionalPropertiesItem_Schema // *AdditionalPropertiesItem_Boolean Oneof isAdditionalPropertiesItem_Oneof `protobuf_oneof:"oneof"` } func (x *AdditionalPropertiesItem) Reset() { *x = AdditionalPropertiesItem{} if protoimpl.UnsafeEnabled { mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[0] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *AdditionalPropertiesItem) String() string { return protoimpl.X.MessageStringOf(x) } func (*AdditionalPropertiesItem) ProtoMessage() {} func (x *AdditionalPropertiesItem) ProtoReflect() protoreflect.Message { mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[0] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use AdditionalPropertiesItem.ProtoReflect.Descriptor instead. func (*AdditionalPropertiesItem) Descriptor() ([]byte, []int) { return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{0} } func (m *AdditionalPropertiesItem) GetOneof() isAdditionalPropertiesItem_Oneof { if m != nil { return m.Oneof } return nil } func (x *AdditionalPropertiesItem) GetSchema() *Schema { if x, ok := x.GetOneof().(*AdditionalPropertiesItem_Schema); ok { return x.Schema } return nil } func (x *AdditionalPropertiesItem) GetBoolean() bool { if x, ok := x.GetOneof().(*AdditionalPropertiesItem_Boolean); ok { return x.Boolean } return false } type isAdditionalPropertiesItem_Oneof interface { isAdditionalPropertiesItem_Oneof() } type AdditionalPropertiesItem_Schema struct { Schema *Schema `protobuf:"bytes,1,opt,name=schema,proto3,oneof"` } type AdditionalPropertiesItem_Boolean struct { Boolean bool `protobuf:"varint,2,opt,name=boolean,proto3,oneof"` } func (*AdditionalPropertiesItem_Schema) isAdditionalPropertiesItem_Oneof() {} func (*AdditionalPropertiesItem_Boolean) isAdditionalPropertiesItem_Oneof() {} type Any struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields Value *anypb.Any `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` Yaml string `protobuf:"bytes,2,opt,name=yaml,proto3" json:"yaml,omitempty"` } func (x *Any) Reset() { *x = Any{} if protoimpl.UnsafeEnabled { mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[1] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *Any) String() string { return protoimpl.X.MessageStringOf(x) } func (*Any) ProtoMessage() {} func (x *Any) ProtoReflect() protoreflect.Message { mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[1] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use Any.ProtoReflect.Descriptor instead. func (*Any) Descriptor() ([]byte, []int) { return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{1} } func (x *Any) GetValue() *anypb.Any { if x != nil { return x.Value } return nil } func (x *Any) GetYaml() string { if x != nil { return x.Yaml } return "" } type ApiKeySecurity struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` In string `protobuf:"bytes,3,opt,name=in,proto3" json:"in,omitempty"` Description string `protobuf:"bytes,4,opt,name=description,proto3" json:"description,omitempty"` VendorExtension []*NamedAny `protobuf:"bytes,5,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` } func (x *ApiKeySecurity) Reset() { *x = ApiKeySecurity{} if protoimpl.UnsafeEnabled { mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[2] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *ApiKeySecurity) String() string { return protoimpl.X.MessageStringOf(x) } func (*ApiKeySecurity) ProtoMessage() {} func (x *ApiKeySecurity) ProtoReflect() protoreflect.Message { mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[2] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use ApiKeySecurity.ProtoReflect.Descriptor instead. func (*ApiKeySecurity) Descriptor() ([]byte, []int) { return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{2} } func (x *ApiKeySecurity) GetType() string { if x != nil { return x.Type } return "" } func (x *ApiKeySecurity) GetName() string { if x != nil { return x.Name } return "" } func (x *ApiKeySecurity) GetIn() string { if x != nil { return x.In } return "" } func (x *ApiKeySecurity) GetDescription() string { if x != nil { return x.Description } return "" } func (x *ApiKeySecurity) GetVendorExtension() []*NamedAny { if x != nil { return x.VendorExtension } return nil } type BasicAuthenticationSecurity struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` VendorExtension []*NamedAny `protobuf:"bytes,3,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` } func (x *BasicAuthenticationSecurity) Reset() { *x = BasicAuthenticationSecurity{} if protoimpl.UnsafeEnabled { mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[3] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *BasicAuthenticationSecurity) String() string { return protoimpl.X.MessageStringOf(x) } func (*BasicAuthenticationSecurity) ProtoMessage() {} func (x *BasicAuthenticationSecurity) ProtoReflect() protoreflect.Message { mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[3] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use BasicAuthenticationSecurity.ProtoReflect.Descriptor instead. func (*BasicAuthenticationSecurity) Descriptor() ([]byte, []int) { return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{3} } func (x *BasicAuthenticationSecurity) GetType() string { if x != nil { return x.Type } return "" } func (x *BasicAuthenticationSecurity) GetDescription() string { if x != nil { return x.Description } return "" } func (x *BasicAuthenticationSecurity) GetVendorExtension() []*NamedAny { if x != nil { return x.VendorExtension } return nil } type BodyParameter struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. Description string `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"` // The name of the parameter. Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` // Determines the location of the parameter. In string `protobuf:"bytes,3,opt,name=in,proto3" json:"in,omitempty"` // Determines whether or not this parameter is required or optional. Required bool `protobuf:"varint,4,opt,name=required,proto3" json:"required,omitempty"` Schema *Schema `protobuf:"bytes,5,opt,name=schema,proto3" json:"schema,omitempty"` VendorExtension []*NamedAny `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` } func (x *BodyParameter) Reset() { *x = BodyParameter{} if protoimpl.UnsafeEnabled { mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[4] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *BodyParameter) String() string { return protoimpl.X.MessageStringOf(x) } func (*BodyParameter) ProtoMessage() {} func (x *BodyParameter) ProtoReflect() protoreflect.Message { mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[4] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use BodyParameter.ProtoReflect.Descriptor instead. func (*BodyParameter) Descriptor() ([]byte, []int) { return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{4} } func (x *BodyParameter) GetDescription() string { if x != nil { return x.Description } return "" } func (x *BodyParameter) GetName() string { if x != nil { return x.Name } return "" } func (x *BodyParameter) GetIn() string { if x != nil { return x.In } return "" } func (x *BodyParameter) GetRequired() bool { if x != nil { return x.Required } return false } func (x *BodyParameter) GetSchema() *Schema { if x != nil { return x.Schema } return nil } func (x *BodyParameter) GetVendorExtension() []*NamedAny { if x != nil { return x.VendorExtension } return nil } // Contact information for the owners of the API. type Contact struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields // The identifying name of the contact person/organization. Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` // The URL pointing to the contact information. Url string `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"` // The email address of the contact person/organization. Email string `protobuf:"bytes,3,opt,name=email,proto3" json:"email,omitempty"` VendorExtension []*NamedAny `protobuf:"bytes,4,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` } func (x *Contact) Reset() { *x = Contact{} if protoimpl.UnsafeEnabled { mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[5] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *Contact) String() string { return protoimpl.X.MessageStringOf(x) } func (*Contact) ProtoMessage() {} func (x *Contact) ProtoReflect() protoreflect.Message { mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[5] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use Contact.ProtoReflect.Descriptor instead. func (*Contact) Descriptor() ([]byte, []int) { return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{5} } func (x *Contact) GetName() string { if x != nil { return x.Name } return "" } func (x *Contact) GetUrl() string { if x != nil { return x.Url } return "" } func (x *Contact) GetEmail() string { if x != nil { return x.Email } return "" } func (x *Contact) GetVendorExtension() []*NamedAny { if x != nil { return x.VendorExtension } return nil } type Default struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields AdditionalProperties []*NamedAny `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` } func (x *Default) Reset() { *x = Default{} if protoimpl.UnsafeEnabled { mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[6] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *Default) String() string { return protoimpl.X.MessageStringOf(x) } func (*Default) ProtoMessage() {} func (x *Default) ProtoReflect() protoreflect.Message { mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[6] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use Default.ProtoReflect.Descriptor instead. func (*Default) Descriptor() ([]byte, []int) { return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{6} } func (x *Default) GetAdditionalProperties() []*NamedAny { if x != nil { return x.AdditionalProperties } return nil } // One or more JSON objects describing the schemas being consumed and produced by the API. type Definitions struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields AdditionalProperties []*NamedSchema `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` } func (x *Definitions) Reset() { *x = Definitions{} if protoimpl.UnsafeEnabled { mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[7] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *Definitions) String() string { return protoimpl.X.MessageStringOf(x) } func (*Definitions) ProtoMessage() {} func (x *Definitions) ProtoReflect() protoreflect.Message { mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[7] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use Definitions.ProtoReflect.Descriptor instead. func (*Definitions) Descriptor() ([]byte, []int) { return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{7} } func (x *Definitions) GetAdditionalProperties() []*NamedSchema { if x != nil { return x.AdditionalProperties } return nil } type Document struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields // The Swagger version of this document. Swagger string `protobuf:"bytes,1,opt,name=swagger,proto3" json:"swagger,omitempty"` Info *Info `protobuf:"bytes,2,opt,name=info,proto3" json:"info,omitempty"` // The host (name or ip) of the API. Example: 'swagger.io' Host string `protobuf:"bytes,3,opt,name=host,proto3" json:"host,omitempty"` // The base path to the API. Example: '/api'. BasePath string `protobuf:"bytes,4,opt,name=base_path,json=basePath,proto3" json:"base_path,omitempty"` // The transfer protocol of the API. Schemes []string `protobuf:"bytes,5,rep,name=schemes,proto3" json:"schemes,omitempty"` // A list of MIME types accepted by the API. Consumes []string `protobuf:"bytes,6,rep,name=consumes,proto3" json:"consumes,omitempty"` // A list of MIME types the API can produce. Produces []string `protobuf:"bytes,7,rep,name=produces,proto3" json:"produces,omitempty"` Paths *Paths `protobuf:"bytes,8,opt,name=paths,proto3" json:"paths,omitempty"` Definitions *Definitions `protobuf:"bytes,9,opt,name=definitions,proto3" json:"definitions,omitempty"` Parameters *ParameterDefinitions `protobuf:"bytes,10,opt,name=parameters,proto3" json:"parameters,omitempty"` Responses *ResponseDefinitions `protobuf:"bytes,11,opt,name=responses,proto3" json:"responses,omitempty"` Security []*SecurityRequirement `protobuf:"bytes,12,rep,name=security,proto3" json:"security,omitempty"` SecurityDefinitions *SecurityDefinitions `protobuf:"bytes,13,opt,name=security_definitions,json=securityDefinitions,proto3" json:"security_definitions,omitempty"` Tags []*Tag `protobuf:"bytes,14,rep,name=tags,proto3" json:"tags,omitempty"` ExternalDocs *ExternalDocs `protobuf:"bytes,15,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"` VendorExtension []*NamedAny `protobuf:"bytes,16,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` } func (x *Document) Reset() { *x = Document{} if protoimpl.UnsafeEnabled { mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[8] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *Document) String() string { return protoimpl.X.MessageStringOf(x) } func (*Document) ProtoMessage() {} func (x *Document) ProtoReflect() protoreflect.Message { mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[8] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use Document.ProtoReflect.Descriptor instead. func (*Document) Descriptor() ([]byte, []int) { return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{8} } func (x *Document) GetSwagger() string { if x != nil { return x.Swagger } return "" } func (x *Document) GetInfo() *Info { if x != nil { return x.Info } return nil } func (x *Document) GetHost() string { if x != nil { return x.Host } return "" } func (x *Document) GetBasePath() string { if x != nil { return x.BasePath } return "" } func (x *Document) GetSchemes() []string { if x != nil { return x.Schemes } return nil } func (x *Document) GetConsumes() []string { if x != nil { return x.Consumes } return nil } func (x *Document) GetProduces() []string { if x != nil { return x.Produces } return nil } func (x *Document) GetPaths() *Paths { if x != nil { return x.Paths } return nil } func (x *Document) GetDefinitions() *Definitions { if x != nil { return x.Definitions } return nil } func (x *Document) GetParameters() *ParameterDefinitions { if x != nil { return x.Parameters } return nil } func (x *Document) GetResponses() *ResponseDefinitions { if x != nil { return x.Responses } return nil } func (x *Document) GetSecurity() []*SecurityRequirement { if x != nil { return x.Security } return nil } func (x *Document) GetSecurityDefinitions() *SecurityDefinitions { if x != nil { return x.SecurityDefinitions } return nil } func (x *Document) GetTags() []*Tag { if x != nil { return x.Tags } return nil } func (x *Document) GetExternalDocs() *ExternalDocs { if x != nil { return x.ExternalDocs } return nil } func (x *Document) GetVendorExtension() []*NamedAny { if x != nil { return x.VendorExtension } return nil } type Examples struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields AdditionalProperties []*NamedAny `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` } func (x *Examples) Reset() { *x = Examples{} if protoimpl.UnsafeEnabled { mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[9] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *Examples) String() string { return protoimpl.X.MessageStringOf(x) } func (*Examples) ProtoMessage() {} func (x *Examples) ProtoReflect() protoreflect.Message { mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[9] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use Examples.ProtoReflect.Descriptor instead. func (*Examples) Descriptor() ([]byte, []int) { return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{9} } func (x *Examples) GetAdditionalProperties() []*NamedAny { if x != nil { return x.AdditionalProperties } return nil } // information about external documentation type ExternalDocs struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields Description string `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"` Url string `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"` VendorExtension []*NamedAny `protobuf:"bytes,3,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` } func (x *ExternalDocs) Reset() { *x = ExternalDocs{} if protoimpl.UnsafeEnabled { mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[10] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *ExternalDocs) String() string { return protoimpl.X.MessageStringOf(x) } func (*ExternalDocs) ProtoMessage() {} func (x *ExternalDocs) ProtoReflect() protoreflect.Message { mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[10] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use ExternalDocs.ProtoReflect.Descriptor instead. func (*ExternalDocs) Descriptor() ([]byte, []int) { return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{10} } func (x *ExternalDocs) GetDescription() string { if x != nil { return x.Description } return "" } func (x *ExternalDocs) GetUrl() string { if x != nil { return x.Url } return "" } func (x *ExternalDocs) GetVendorExtension() []*NamedAny { if x != nil { return x.VendorExtension } return nil } // A deterministic version of a JSON Schema object. type FileSchema struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields Format string `protobuf:"bytes,1,opt,name=format,proto3" json:"format,omitempty"` Title string `protobuf:"bytes,2,opt,name=title,proto3" json:"title,omitempty"` Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` Default *Any `protobuf:"bytes,4,opt,name=default,proto3" json:"default,omitempty"` Required []string `protobuf:"bytes,5,rep,name=required,proto3" json:"required,omitempty"` Type string `protobuf:"bytes,6,opt,name=type,proto3" json:"type,omitempty"` ReadOnly bool `protobuf:"varint,7,opt,name=read_only,json=readOnly,proto3" json:"read_only,omitempty"` ExternalDocs *ExternalDocs `protobuf:"bytes,8,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"` Example *Any `protobuf:"bytes,9,opt,name=example,proto3" json:"example,omitempty"` VendorExtension []*NamedAny `protobuf:"bytes,10,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` } func (x *FileSchema) Reset() { *x = FileSchema{} if protoimpl.UnsafeEnabled { mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[11] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *FileSchema) String() string { return protoimpl.X.MessageStringOf(x) } func (*FileSchema) ProtoMessage() {} func (x *FileSchema) ProtoReflect() protoreflect.Message { mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[11] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use FileSchema.ProtoReflect.Descriptor instead. func (*FileSchema) Descriptor() ([]byte, []int) { return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{11} } func (x *FileSchema) GetFormat() string { if x != nil { return x.Format } return "" } func (x *FileSchema) GetTitle() string { if x != nil { return x.Title } return "" } func (x *FileSchema) GetDescription() string { if x != nil { return x.Description } return "" } func (x *FileSchema) GetDefault() *Any { if x != nil { return x.Default } return nil } func (x *FileSchema) GetRequired() []string { if x != nil { return x.Required } return nil } func (x *FileSchema) GetType() string { if x != nil { return x.Type } return "" } func (x *FileSchema) GetReadOnly() bool { if x != nil { return x.ReadOnly } return false } func (x *FileSchema) GetExternalDocs() *ExternalDocs { if x != nil { return x.ExternalDocs } return nil } func (x *FileSchema) GetExample() *Any { if x != nil { return x.Example } return nil } func (x *FileSchema) GetVendorExtension() []*NamedAny { if x != nil { return x.VendorExtension } return nil } type FormDataParameterSubSchema struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields // Determines whether or not this parameter is required or optional. Required bool `protobuf:"varint,1,opt,name=required,proto3" json:"required,omitempty"` // Determines the location of the parameter. In string `protobuf:"bytes,2,opt,name=in,proto3" json:"in,omitempty"` // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` // The name of the parameter. Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` // allows sending a parameter by name only or with an empty value. AllowEmptyValue bool `protobuf:"varint,5,opt,name=allow_empty_value,json=allowEmptyValue,proto3" json:"allow_empty_value,omitempty"` Type string `protobuf:"bytes,6,opt,name=type,proto3" json:"type,omitempty"` Format string `protobuf:"bytes,7,opt,name=format,proto3" json:"format,omitempty"` Items *PrimitivesItems `protobuf:"bytes,8,opt,name=items,proto3" json:"items,omitempty"` CollectionFormat string `protobuf:"bytes,9,opt,name=collection_format,json=collectionFormat,proto3" json:"collection_format,omitempty"` Default *Any `protobuf:"bytes,10,opt,name=default,proto3" json:"default,omitempty"` Maximum float64 `protobuf:"fixed64,11,opt,name=maximum,proto3" json:"maximum,omitempty"` ExclusiveMaximum bool `protobuf:"varint,12,opt,name=exclusive_maximum,json=exclusiveMaximum,proto3" json:"exclusive_maximum,omitempty"` Minimum float64 `protobuf:"fixed64,13,opt,name=minimum,proto3" json:"minimum,omitempty"` ExclusiveMinimum bool `protobuf:"varint,14,opt,name=exclusive_minimum,json=exclusiveMinimum,proto3" json:"exclusive_minimum,omitempty"` MaxLength int64 `protobuf:"varint,15,opt,name=max_length,json=maxLength,proto3" json:"max_length,omitempty"` MinLength int64 `protobuf:"varint,16,opt,name=min_length,json=minLength,proto3" json:"min_length,omitempty"` Pattern string `protobuf:"bytes,17,opt,name=pattern,proto3" json:"pattern,omitempty"` MaxItems int64 `protobuf:"varint,18,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"` MinItems int64 `protobuf:"varint,19,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"` UniqueItems bool `protobuf:"varint,20,opt,name=unique_items,json=uniqueItems,proto3" json:"unique_items,omitempty"` Enum []*Any `protobuf:"bytes,21,rep,name=enum,proto3" json:"enum,omitempty"` MultipleOf float64 `protobuf:"fixed64,22,opt,name=multiple_of,json=multipleOf,proto3" json:"multiple_of,omitempty"` VendorExtension []*NamedAny `protobuf:"bytes,23,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` } func (x *FormDataParameterSubSchema) Reset() { *x = FormDataParameterSubSchema{} if protoimpl.UnsafeEnabled { mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[12] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *FormDataParameterSubSchema) String() string { return protoimpl.X.MessageStringOf(x) } func (*FormDataParameterSubSchema) ProtoMessage() {} func (x *FormDataParameterSubSchema) ProtoReflect() protoreflect.Message { mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[12] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use FormDataParameterSubSchema.ProtoReflect.Descriptor instead. func (*FormDataParameterSubSchema) Descriptor() ([]byte, []int) { return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{12} } func (x *FormDataParameterSubSchema) GetRequired() bool { if x != nil { return x.Required } return false } func (x *FormDataParameterSubSchema) GetIn() string { if x != nil { return x.In } return "" } func (x *FormDataParameterSubSchema) GetDescription() string { if x != nil { return x.Description } return "" } func (x *FormDataParameterSubSchema) GetName() string { if x != nil { return x.Name } return "" } func (x *FormDataParameterSubSchema) GetAllowEmptyValue() bool { if x != nil { return x.AllowEmptyValue } return false } func (x *FormDataParameterSubSchema) GetType() string { if x != nil { return x.Type } return "" } func (x *FormDataParameterSubSchema) GetFormat() string { if x != nil { return x.Format } return "" } func (x *FormDataParameterSubSchema) GetItems() *PrimitivesItems { if x != nil { return x.Items } return nil } func (x *FormDataParameterSubSchema) GetCollectionFormat() string { if x != nil { return x.CollectionFormat } return "" } func (x *FormDataParameterSubSchema) GetDefault() *Any { if x != nil { return x.Default } return nil } func (x *FormDataParameterSubSchema) GetMaximum() float64 { if x != nil { return x.Maximum } return 0 } func (x *FormDataParameterSubSchema) GetExclusiveMaximum() bool { if x != nil { return x.ExclusiveMaximum } return false } func (x *FormDataParameterSubSchema) GetMinimum() float64 { if x != nil { return x.Minimum } return 0 } func (x *FormDataParameterSubSchema) GetExclusiveMinimum() bool { if x != nil { return x.ExclusiveMinimum } return false } func (x *FormDataParameterSubSchema) GetMaxLength() int64 { if x != nil { return x.MaxLength } return 0 } func (x *FormDataParameterSubSchema) GetMinLength() int64 { if x != nil { return x.MinLength } return 0 } func (x *FormDataParameterSubSchema) GetPattern() string { if x != nil { return x.Pattern } return "" } func (x *FormDataParameterSubSchema) GetMaxItems() int64 { if x != nil { return x.MaxItems } return 0 } func (x *FormDataParameterSubSchema) GetMinItems() int64 { if x != nil { return x.MinItems } return 0 } func (x *FormDataParameterSubSchema) GetUniqueItems() bool { if x != nil { return x.UniqueItems } return false } func (x *FormDataParameterSubSchema) GetEnum() []*Any { if x != nil { return x.Enum } return nil }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
true