file_name
large_stringlengths
4
140
prefix
large_stringlengths
0
39k
suffix
large_stringlengths
0
36.1k
middle
large_stringlengths
0
29.4k
fim_type
large_stringclasses
4 values
binder.go
package config import ( "errors" "fmt" "go/token" "go/types" "strings" "golang.org/x/tools/go/packages" "github.com/99designs/gqlgen/internal/code" "github.com/vektah/gqlparser/v2/ast" ) var ErrTypeNotFound = errors.New("unable to find type") // Binder connects graphql types to golang types using static analysis type Binder struct { pkgs *code.Packages schema *ast.Schema cfg *Config tctx *types.Context References []*TypeReference SawInvalid bool objectCache map[string]map[string]types.Object } func (c *Config) NewBinder() *Binder { return &Binder{ pkgs: c.Packages, schema: c.Schema, cfg: c, } } func (b *Binder) TypePosition(typ types.Type) token.Position { named, isNamed := typ.(*types.Named) if !isNamed { return token.Position{ Filename: "unknown", } } return b.ObjectPosition(named.Obj()) } func (b *Binder) ObjectPosition(typ types.Object) token.Position { if typ == nil { return token.Position{ Filename: "unknown", } } pkg := b.pkgs.Load(typ.Pkg().Path()) return pkg.Fset.Position(typ.Pos()) } func (b *Binder) FindTypeFromName(name string) (types.Type, error) { pkgName, typeName := code.PkgAndType(name) return b.FindType(pkgName, typeName) } func (b *Binder) FindType(pkgName string, typeName string) (types.Type, error) { if pkgName == "" { if typeName == "map[string]interface{}" { return MapType, nil } if typeName == "interface{}" { return InterfaceType, nil } } obj, err := b.FindObject(pkgName, typeName) if err != nil { return nil, err } if fun, isFunc := obj.(*types.Func); isFunc { return fun.Type().(*types.Signature).Params().At(0).Type(), nil } return obj.Type(), nil } func (b *Binder) InstantiateType(orig types.Type, targs []types.Type) (types.Type, error) { if b.tctx == nil { b.tctx = types.NewContext() } return types.Instantiate(b.tctx, orig, targs, false) } var ( MapType = types.NewMap(types.Typ[types.String], types.NewInterfaceType(nil, nil).Complete()) InterfaceType = types.NewInterfaceType(nil, nil) ) func (b *Binder) DefaultUserObject(name string) (types.Type, error) { models := b.cfg.Models[name].Model if len(models) == 0 { return nil, fmt.Errorf(name + " not found in typemap") } if models[0] == "map[string]interface{}" { return MapType, nil } if models[0] == "interface{}" { return InterfaceType, nil } pkgName, typeName := code.PkgAndType(models[0]) if pkgName == "" { return nil, fmt.Errorf("missing package name for %s", name) } obj, err := b.FindObject(pkgName, typeName) if err != nil { return nil, err } return obj.Type(), nil } func (b *Binder) FindObject(pkgName string, typeName string) (types.Object, error) { if pkgName == "" { return nil, fmt.Errorf("package cannot be nil") } pkg := b.pkgs.LoadWithTypes(pkgName) if pkg == nil { err := b.pkgs.Errors() if err != nil { return nil, fmt.Errorf("package could not be loaded: %s.%s: %w", pkgName, typeName, err) } return nil, fmt.Errorf("required package was not loaded: %s.%s", pkgName, typeName) } if b.objectCache == nil { b.objectCache = make(map[string]map[string]types.Object, b.pkgs.Count()) } defsIndex, ok := b.objectCache[pkgName] if !ok { defsIndex = indexDefs(pkg) b.objectCache[pkgName] = defsIndex } // function based marshalers take precedence if val, ok := defsIndex["Marshal"+typeName]; ok { return val, nil } if val, ok := defsIndex[typeName]; ok { return val, nil } return nil, fmt.Errorf("%w: %s.%s", ErrTypeNotFound, pkgName, typeName) } func indexDefs(pkg *packages.Package) map[string]types.Object { res := make(map[string]types.Object) scope := pkg.Types.Scope() for astNode, def := range pkg.TypesInfo.Defs { // only look at defs in the top scope if def == nil { continue } parent := def.Parent() if parent == nil || parent != scope { continue } if _, ok := res[astNode.Name]; !ok { // The above check may not be really needed, it is only here to have a consistent behavior with // previous implementation of FindObject() function which only honored the first inclusion of a def. // If this is still needed, we can consider something like sync.Map.LoadOrStore() to avoid two lookups. res[astNode.Name] = def } } return res } func (b *Binder) PointerTo(ref *TypeReference) *TypeReference { newRef := *ref newRef.GO = types.NewPointer(ref.GO) b.References = append(b.References, &newRef) return &newRef } // TypeReference is used by args and field types. The Definition can refer to both input and output types. type TypeReference struct { Definition *ast.Definition GQL *ast.Type GO types.Type // Type of the field being bound. Could be a pointer or a value type of Target. Target types.Type // The actual type that we know how to bind to. May require pointer juggling when traversing to fields. CastType types.Type // Before calling marshalling functions cast from/to this base type Marshaler *types.Func // When using external marshalling functions this will point to the Marshal function Unmarshaler *types.Func // When using external marshalling functions this will point to the Unmarshal function IsMarshaler bool // Does the type implement graphql.Marshaler and graphql.Unmarshaler IsOmittable bool // Is the type wrapped with Omittable IsContext bool // Is the Marshaler/Unmarshaller the context version; applies to either the method or interface variety. PointersInUmarshalInput bool // Inverse values and pointers in return. } func (ref *TypeReference) Elem() *TypeReference { if p, isPtr := ref.GO.(*types.Pointer); isPtr { newRef := *ref newRef.GO = p.Elem() return &newRef } if ref.IsSlice() { newRef := *ref newRef.GO = ref.GO.(*types.Slice).Elem() newRef.GQL = ref.GQL.Elem return &newRef } return nil } func (ref *TypeReference) IsPtr() bool { _, isPtr := ref.GO.(*types.Pointer) return isPtr } // fix for https://github.com/golang/go/issues/31103 may make it possible to remove this (may still be useful) func (ref *TypeReference) IsPtrToPtr() bool { if p, isPtr := ref.GO.(*types.Pointer); isPtr { _, isPtr := p.Elem().(*types.Pointer) return isPtr } return false } func (ref *TypeReference) IsNilable() bool { return IsNilable(ref.GO) } func (ref *TypeReference) IsSlice() bool { _, isSlice := ref.GO.(*types.Slice) return ref.GQL.Elem != nil && isSlice } func (ref *TypeReference) IsPtrToSlice() bool { if ref.IsPtr() { _, isPointerToSlice := ref.GO.(*types.Pointer).Elem().(*types.Slice) return isPointerToSlice } return false } func (ref *TypeReference) IsPtrToIntf() bool { if ref.IsPtr() { _, isPointerToInterface := ref.GO.(*types.Pointer).Elem().(*types.Interface) return isPointerToInterface } return false } func (ref *TypeReference) IsNamed() bool { _, isSlice := ref.GO.(*types.Named) return isSlice } func (ref *TypeReference) IsStruct() bool { _, isStruct := ref.GO.Underlying().(*types.Struct) return isStruct } func (ref *TypeReference) IsScalar() bool { return ref.Definition.Kind == ast.Scalar } func (ref *TypeReference) UniquenessKey() string { nullability := "O" if ref.GQL.NonNull { nullability = "N" } elemNullability := "" if ref.GQL.Elem != nil && ref.GQL.Elem.NonNull { // Fix for #896 elemNullability = "ᚄ" } return nullability + ref.Definition.Name + "2" + TypeIdentifier(ref.GO) + elemNullability } func (ref *TypeReference) MarshalFunc() string { if ref.Definition == nil { panic(errors.New("Definition missing for " + ref.GQL.Name())) } if ref.Definition.Kind == ast.InputObject { return "" } return "marshal" + ref.UniquenessKey() } func (ref *TypeReference) UnmarshalFunc() string { if ref.Definition == nil { panic(errors.New("Definition missing for " + ref.GQL.Name())) } if !ref.Definition.IsInputType() { return "" } return "unmarshal" + ref.UniquenessKey() } func (ref *TypeReference) IsTargetNilable() bool { return IsNilable(ref.Target) } func (b *Binder) PushRef(ret *TypeReference) { b.References = append(b.References, ret) } func isMap(t types.Type) bool { if t == nil { return true } _, ok := t.(*types.Map) return ok } func isIntf(t types.Type) bool { if t == nil { return true } _, ok := t.(*types.Interface) return ok } func un
types.Type) (types.Type, bool) { if t == nil { return t, false } named, ok := t.(*types.Named) if !ok { return t, false } if named.Origin().String() != "github.com/99designs/gqlgen/graphql.Omittable[T any]" { return t, false } return named.TypeArgs().At(0), true } func (b *Binder) TypeReference(schemaType *ast.Type, bindTarget types.Type) (ret *TypeReference, err error) { if innerType, ok := unwrapOmittable(bindTarget); ok { if schemaType.NonNull { return nil, fmt.Errorf("%s is wrapped with Omittable but non-null", schemaType.Name()) } ref, err := b.TypeReference(schemaType, innerType) if err != nil { return nil, err } ref.IsOmittable = true return ref, err } if !isValid(bindTarget) { b.SawInvalid = true return nil, fmt.Errorf("%s has an invalid type", schemaType.Name()) } var pkgName, typeName string def := b.schema.Types[schemaType.Name()] defer func() { if err == nil && ret != nil { b.PushRef(ret) } }() if len(b.cfg.Models[schemaType.Name()].Model) == 0 { return nil, fmt.Errorf("%s was not found", schemaType.Name()) } for _, model := range b.cfg.Models[schemaType.Name()].Model { if model == "map[string]interface{}" { if !isMap(bindTarget) { continue } return &TypeReference{ Definition: def, GQL: schemaType, GO: MapType, }, nil } if model == "interface{}" { if !isIntf(bindTarget) { continue } return &TypeReference{ Definition: def, GQL: schemaType, GO: InterfaceType, }, nil } pkgName, typeName = code.PkgAndType(model) if pkgName == "" { return nil, fmt.Errorf("missing package name for %s", schemaType.Name()) } ref := &TypeReference{ Definition: def, GQL: schemaType, } obj, err := b.FindObject(pkgName, typeName) if err != nil { return nil, err } if fun, isFunc := obj.(*types.Func); isFunc { ref.GO = fun.Type().(*types.Signature).Params().At(0).Type() ref.IsContext = fun.Type().(*types.Signature).Results().At(0).Type().String() == "github.com/99designs/gqlgen/graphql.ContextMarshaler" ref.Marshaler = fun ref.Unmarshaler = types.NewFunc(0, fun.Pkg(), "Unmarshal"+typeName, nil) } else if hasMethod(obj.Type(), "MarshalGQLContext") && hasMethod(obj.Type(), "UnmarshalGQLContext") { ref.GO = obj.Type() ref.IsContext = true ref.IsMarshaler = true } else if hasMethod(obj.Type(), "MarshalGQL") && hasMethod(obj.Type(), "UnmarshalGQL") { ref.GO = obj.Type() ref.IsMarshaler = true } else if underlying := basicUnderlying(obj.Type()); def.IsLeafType() && underlying != nil && underlying.Kind() == types.String { // TODO delete before v1. Backwards compatibility case for named types wrapping strings (see #595) ref.GO = obj.Type() ref.CastType = underlying underlyingRef, err := b.TypeReference(&ast.Type{NamedType: "String"}, nil) if err != nil { return nil, err } ref.Marshaler = underlyingRef.Marshaler ref.Unmarshaler = underlyingRef.Unmarshaler } else { ref.GO = obj.Type() } ref.Target = ref.GO ref.GO = b.CopyModifiersFromAst(schemaType, ref.GO) if bindTarget != nil { if err = code.CompatibleTypes(ref.GO, bindTarget); err != nil { continue } ref.GO = bindTarget } ref.PointersInUmarshalInput = b.cfg.ReturnPointersInUmarshalInput return ref, nil } return nil, fmt.Errorf("%s is incompatible with %s", schemaType.Name(), bindTarget.String()) } func isValid(t types.Type) bool { basic, isBasic := t.(*types.Basic) if !isBasic { return true } return basic.Kind() != types.Invalid } func (b *Binder) CopyModifiersFromAst(t *ast.Type, base types.Type) types.Type { if t.Elem != nil { child := b.CopyModifiersFromAst(t.Elem, base) if _, isStruct := child.Underlying().(*types.Struct); isStruct && !b.cfg.OmitSliceElementPointers { child = types.NewPointer(child) } return types.NewSlice(child) } var isInterface bool if named, ok := base.(*types.Named); ok { _, isInterface = named.Underlying().(*types.Interface) } if !isInterface && !IsNilable(base) && !t.NonNull { return types.NewPointer(base) } return base } func IsNilable(t types.Type) bool { if namedType, isNamed := t.(*types.Named); isNamed { return IsNilable(namedType.Underlying()) } _, isPtr := t.(*types.Pointer) _, isMap := t.(*types.Map) _, isInterface := t.(*types.Interface) _, isSlice := t.(*types.Slice) _, isChan := t.(*types.Chan) return isPtr || isMap || isInterface || isSlice || isChan } func hasMethod(it types.Type, name string) bool { if ptr, isPtr := it.(*types.Pointer); isPtr { it = ptr.Elem() } namedType, ok := it.(*types.Named) if !ok { return false } for i := 0; i < namedType.NumMethods(); i++ { if namedType.Method(i).Name() == name { return true } } return false } func basicUnderlying(it types.Type) *types.Basic { if ptr, isPtr := it.(*types.Pointer); isPtr { it = ptr.Elem() } namedType, ok := it.(*types.Named) if !ok { return nil } if basic, ok := namedType.Underlying().(*types.Basic); ok { return basic } return nil } var pkgReplacer = strings.NewReplacer( "/", "ᚋ", ".", "ᚗ", "-", "ᚑ", "~", "א", ) func TypeIdentifier(t types.Type) string { res := "" for { switch it := t.(type) { case *types.Pointer: t.Underlying() res += "ᚖ" t = it.Elem() case *types.Slice: res += "ᚕ" t = it.Elem() case *types.Named: res += pkgReplacer.Replace(it.Obj().Pkg().Path()) res += "ᚐ" res += it.Obj().Name() return res case *types.Basic: res += it.Name() return res case *types.Map: res += "map" return res case *types.Interface: res += "interface" return res default: panic(fmt.Errorf("unexpected type %T", it)) } } }
wrapOmittable(t
identifier_name
binder.go
package config import ( "errors" "fmt" "go/token" "go/types" "strings" "golang.org/x/tools/go/packages" "github.com/99designs/gqlgen/internal/code" "github.com/vektah/gqlparser/v2/ast" ) var ErrTypeNotFound = errors.New("unable to find type") // Binder connects graphql types to golang types using static analysis type Binder struct { pkgs *code.Packages schema *ast.Schema cfg *Config tctx *types.Context References []*TypeReference SawInvalid bool objectCache map[string]map[string]types.Object } func (c *Config) NewBinder() *Binder { return &Binder{ pkgs: c.Packages, schema: c.Schema, cfg: c, } } func (b *Binder) TypePosition(typ types.Type) token.Position { named, isNamed := typ.(*types.Named) if !isNamed { return token.Position{ Filename: "unknown", } } return b.ObjectPosition(named.Obj()) } func (b *Binder) ObjectPosition(typ types.Object) token.Position { if typ == nil { return token.Position{ Filename: "unknown", } } pkg := b.pkgs.Load(typ.Pkg().Path()) return pkg.Fset.Position(typ.Pos()) } func (b *Binder) FindTypeFromName(name string) (types.Type, error) { pkgName, typeName := code.PkgAndType(name) return b.FindType(pkgName, typeName) } func (b *Binder) FindType(pkgName string, typeName string) (types.Type, error) { if pkgName == "" { if typeName == "map[string]interface{}" { return MapType, nil } if typeName == "interface{}" { return InterfaceType, nil } } obj, err := b.FindObject(pkgName, typeName) if err != nil { return nil, err } if fun, isFunc := obj.(*types.Func); isFunc { return fun.Type().(*types.Signature).Params().At(0).Type(), nil } return obj.Type(), nil } func (b *Binder) InstantiateType(orig types.Type, targs []types.Type) (types.Type, error) { if b.tctx == nil { b.tctx = types.NewContext() } return types.Instantiate(b.tctx, orig, targs, false) } var ( MapType = types.NewMap(types.Typ[types.String], types.NewInterfaceType(nil, nil).Complete()) InterfaceType = types.NewInterfaceType(nil, nil) ) func (b *Binder) DefaultUserObject(name string) (types.Type, error) { models := b.cfg.Models[name].Model if len(models) == 0 { return nil, fmt.Errorf(name + " not found in typemap") } if models[0] == "map[string]interface{}" { return MapType, nil } if models[0] == "interface{}" { return InterfaceType, nil } pkgName, typeName := code.PkgAndType(models[0]) if pkgName == "" { return nil, fmt.Errorf("missing package name for %s", name) } obj, err := b.FindObject(pkgName, typeName) if err != nil { return nil, err } return obj.Type(), nil } func (b *Binder) FindObject(pkgName string, typeName string) (types.Object, error) { if pkgName == "" { return nil, fmt.Errorf("package cannot be nil") } pkg := b.pkgs.LoadWithTypes(pkgName) if pkg == nil { err := b.pkgs.Errors() if err != nil { return nil, fmt.Errorf("package could not be loaded: %s.%s: %w", pkgName, typeName, err) } return nil, fmt.Errorf("required package was not loaded: %s.%s", pkgName, typeName) } if b.objectCache == nil { b.objectCache = make(map[string]map[string]types.Object, b.pkgs.Count()) } defsIndex, ok := b.objectCache[pkgName] if !ok { defsIndex = indexDefs(pkg) b.objectCache[pkgName] = defsIndex } // function based marshalers take precedence if val, ok := defsIndex["Marshal"+typeName]; ok { return val, nil } if val, ok := defsIndex[typeName]; ok { return val, nil } return nil, fmt.Errorf("%w: %s.%s", ErrTypeNotFound, pkgName, typeName) } func indexDefs(pkg *packages.Package) map[string]types.Object { res := make(map[string]types.Object) scope := pkg.Types.Scope() for astNode, def := range pkg.TypesInfo.Defs { // only look at defs in the top scope if def == nil { continue } parent := def.Parent() if parent == nil || parent != scope { continue } if _, ok := res[astNode.Name]; !ok { // The above check may not be really needed, it is only here to have a consistent behavior with // previous implementation of FindObject() function which only honored the first inclusion of a def. // If this is still needed, we can consider something like sync.Map.LoadOrStore() to avoid two lookups. res[astNode.Name] = def } } return res } func (b *Binder) PointerTo(ref *TypeReference) *TypeReference { newRef := *ref newRef.GO = types.NewPointer(ref.GO) b.References = append(b.References, &newRef) return &newRef } // TypeReference is used by args and field types. The Definition can refer to both input and output types. type TypeReference struct { Definition *ast.Definition GQL *ast.Type GO types.Type // Type of the field being bound. Could be a pointer or a value type of Target. Target types.Type // The actual type that we know how to bind to. May require pointer juggling when traversing to fields. CastType types.Type // Before calling marshalling functions cast from/to this base type Marshaler *types.Func // When using external marshalling functions this will point to the Marshal function Unmarshaler *types.Func // When using external marshalling functions this will point to the Unmarshal function IsMarshaler bool // Does the type implement graphql.Marshaler and graphql.Unmarshaler IsOmittable bool // Is the type wrapped with Omittable IsContext bool // Is the Marshaler/Unmarshaller the context version; applies to either the method or interface variety. PointersInUmarshalInput bool // Inverse values and pointers in return. } func (ref *TypeReference) Elem() *TypeReference { if p, isPtr := ref.GO.(*types.Pointer); isPtr { newRef := *ref newRef.GO = p.Elem() return &newRef } if ref.IsSlice() { newRef := *ref newRef.GO = ref.GO.(*types.Slice).Elem() newRef.GQL = ref.GQL.Elem return &newRef } return nil } func (ref *TypeReference) IsPtr() bool { _, isPtr := ref.GO.(*types.Pointer) return isPtr } // fix for https://github.com/golang/go/issues/31103 may make it possible to remove this (may still be useful) func (ref *TypeReference) IsPtrToPtr() bool { if p, isPtr := ref.GO.(*types.Pointer); isPtr { _, isPtr := p.Elem().(*types.Pointer) return isPtr } return false } func (ref *TypeReference) IsNilable() bool { return IsNilable(ref.GO) } func (ref *TypeReference) IsSlice() bool { _, isSlice := ref.GO.(*types.Slice) return ref.GQL.Elem != nil && isSlice } func (ref *TypeReference) IsPtrToSlice() bool { if ref.IsPtr() { _, isPointerToSlice := ref.GO.(*types.Pointer).Elem().(*types.Slice) return isPointerToSlice } return false } func (ref *TypeReference) IsPtrToIntf() bool { if ref.IsPtr() { _, isPointerToInterface := ref.GO.(*types.Pointer).Elem().(*types.Interface) return isPointerToInterface } return false } func (ref *TypeReference) IsNamed() bool { _, isSlice := ref.GO.(*types.Named) return isSlice } func (ref *TypeReference) IsStruct() bool { _, isStruct := ref.GO.Underlying().(*types.Struct) return isStruct } func (ref *TypeReference) IsScalar() bool { return ref.Definition.Kind == ast.Scalar } func (ref *TypeReference) UniquenessKey() string { nullability := "O" if ref.GQL.NonNull { nullability = "N" } elemNullability := "" if ref.GQL.Elem != nil && ref.GQL.Elem.NonNull { // Fix for #896 elemNullability = "ᚄ" } return nullability + ref.Definition.Name + "2" + TypeIdentifier(ref.GO) + elemNullability } func (ref *TypeReference) MarshalFunc() string { if ref.Definition == nil { panic(errors.New("Definition missing for " + ref.GQL.Name())) } if ref.Definition.Kind == ast.InputObject { return "" } return "marshal" + ref.UniquenessKey() } func (ref *TypeReference) UnmarshalFunc() string { if ref.Definition == nil { panic(errors.New("Definition missing for " + ref.GQL.Name())) } if !ref.Definition.IsInputType() { return "" } return "unmarshal" + ref.UniquenessKey() } func (ref *TypeReference) IsTargetNilable() bool { return IsNilable(ref.Target) } func (b *Binder) PushRef(ret *TypeReference) { b.References = append(b.References, ret) } func isMap(t types.Type) bool { if t == nil { return true } _, ok := t.(*types.Map) return ok } func isIntf(t types.Type) bool { if t == nil { return true } _, ok := t.(*types.Interface) return ok } func unwrapOmittable(t types.Type) (types.Type, bool) { if t == nil { return t, false } named, ok := t.(*types.Named) if !ok { return t, false } if named.Origin().String() != "github.com/99designs/gqlgen/graphql.Omittable[T any]" { return t, false } return named.TypeArgs().At(0), true } func (b *Binder) TypeReference(schemaType *ast.Type, bindTarget types.Type) (ret *TypeReference, err error) { if innerType, ok := unwrapOmittable(bindTarget); ok { if schemaType.NonNull { return nil, fmt.Errorf("%s is wrapped with Omittable but non-null", schemaType.Name()) } ref, err := b.TypeReference(schemaType, innerType) if err != nil { return nil, err } ref.IsOmittable = true return ref, err } if !isValid(bindTarget) { b.SawInvalid = true return nil, fmt.Errorf("%s has an invalid type", schemaType.Name()) } var pkgName, typeName string def := b.schema.Types[schemaType.Name()] defer func() { if err == nil && ret != nil { b.PushRef(ret) } }() if len(b.cfg.Models[schemaType.Name()].Model) == 0 { return nil, fmt.Errorf("%s was not found", schemaType.Name()) } for _, model := range b.cfg.Models[schemaType.Name()].Model { if model == "map[string]interface{}" { if !isMap(bindTarget) { continue } return &TypeReference{ Definition: def, GQL: schemaType, GO: MapType, }, nil }
continue } return &TypeReference{ Definition: def, GQL: schemaType, GO: InterfaceType, }, nil } pkgName, typeName = code.PkgAndType(model) if pkgName == "" { return nil, fmt.Errorf("missing package name for %s", schemaType.Name()) } ref := &TypeReference{ Definition: def, GQL: schemaType, } obj, err := b.FindObject(pkgName, typeName) if err != nil { return nil, err } if fun, isFunc := obj.(*types.Func); isFunc { ref.GO = fun.Type().(*types.Signature).Params().At(0).Type() ref.IsContext = fun.Type().(*types.Signature).Results().At(0).Type().String() == "github.com/99designs/gqlgen/graphql.ContextMarshaler" ref.Marshaler = fun ref.Unmarshaler = types.NewFunc(0, fun.Pkg(), "Unmarshal"+typeName, nil) } else if hasMethod(obj.Type(), "MarshalGQLContext") && hasMethod(obj.Type(), "UnmarshalGQLContext") { ref.GO = obj.Type() ref.IsContext = true ref.IsMarshaler = true } else if hasMethod(obj.Type(), "MarshalGQL") && hasMethod(obj.Type(), "UnmarshalGQL") { ref.GO = obj.Type() ref.IsMarshaler = true } else if underlying := basicUnderlying(obj.Type()); def.IsLeafType() && underlying != nil && underlying.Kind() == types.String { // TODO delete before v1. Backwards compatibility case for named types wrapping strings (see #595) ref.GO = obj.Type() ref.CastType = underlying underlyingRef, err := b.TypeReference(&ast.Type{NamedType: "String"}, nil) if err != nil { return nil, err } ref.Marshaler = underlyingRef.Marshaler ref.Unmarshaler = underlyingRef.Unmarshaler } else { ref.GO = obj.Type() } ref.Target = ref.GO ref.GO = b.CopyModifiersFromAst(schemaType, ref.GO) if bindTarget != nil { if err = code.CompatibleTypes(ref.GO, bindTarget); err != nil { continue } ref.GO = bindTarget } ref.PointersInUmarshalInput = b.cfg.ReturnPointersInUmarshalInput return ref, nil } return nil, fmt.Errorf("%s is incompatible with %s", schemaType.Name(), bindTarget.String()) } func isValid(t types.Type) bool { basic, isBasic := t.(*types.Basic) if !isBasic { return true } return basic.Kind() != types.Invalid } func (b *Binder) CopyModifiersFromAst(t *ast.Type, base types.Type) types.Type { if t.Elem != nil { child := b.CopyModifiersFromAst(t.Elem, base) if _, isStruct := child.Underlying().(*types.Struct); isStruct && !b.cfg.OmitSliceElementPointers { child = types.NewPointer(child) } return types.NewSlice(child) } var isInterface bool if named, ok := base.(*types.Named); ok { _, isInterface = named.Underlying().(*types.Interface) } if !isInterface && !IsNilable(base) && !t.NonNull { return types.NewPointer(base) } return base } func IsNilable(t types.Type) bool { if namedType, isNamed := t.(*types.Named); isNamed { return IsNilable(namedType.Underlying()) } _, isPtr := t.(*types.Pointer) _, isMap := t.(*types.Map) _, isInterface := t.(*types.Interface) _, isSlice := t.(*types.Slice) _, isChan := t.(*types.Chan) return isPtr || isMap || isInterface || isSlice || isChan } func hasMethod(it types.Type, name string) bool { if ptr, isPtr := it.(*types.Pointer); isPtr { it = ptr.Elem() } namedType, ok := it.(*types.Named) if !ok { return false } for i := 0; i < namedType.NumMethods(); i++ { if namedType.Method(i).Name() == name { return true } } return false } func basicUnderlying(it types.Type) *types.Basic { if ptr, isPtr := it.(*types.Pointer); isPtr { it = ptr.Elem() } namedType, ok := it.(*types.Named) if !ok { return nil } if basic, ok := namedType.Underlying().(*types.Basic); ok { return basic } return nil } var pkgReplacer = strings.NewReplacer( "/", "ᚋ", ".", "ᚗ", "-", "ᚑ", "~", "א", ) func TypeIdentifier(t types.Type) string { res := "" for { switch it := t.(type) { case *types.Pointer: t.Underlying() res += "ᚖ" t = it.Elem() case *types.Slice: res += "ᚕ" t = it.Elem() case *types.Named: res += pkgReplacer.Replace(it.Obj().Pkg().Path()) res += "ᚐ" res += it.Obj().Name() return res case *types.Basic: res += it.Name() return res case *types.Map: res += "map" return res case *types.Interface: res += "interface" return res default: panic(fmt.Errorf("unexpected type %T", it)) } } }
if model == "interface{}" { if !isIntf(bindTarget) {
random_line_split
binder.go
package config import ( "errors" "fmt" "go/token" "go/types" "strings" "golang.org/x/tools/go/packages" "github.com/99designs/gqlgen/internal/code" "github.com/vektah/gqlparser/v2/ast" ) var ErrTypeNotFound = errors.New("unable to find type") // Binder connects graphql types to golang types using static analysis type Binder struct { pkgs *code.Packages schema *ast.Schema cfg *Config tctx *types.Context References []*TypeReference SawInvalid bool objectCache map[string]map[string]types.Object } func (c *Config) NewBinder() *Binder { return &Binder{ pkgs: c.Packages, schema: c.Schema, cfg: c, } } func (b *Binder) TypePosition(typ types.Type) token.Position { named, isNamed := typ.(*types.Named) if !isNamed { return token.Position{ Filename: "unknown", } } return b.ObjectPosition(named.Obj()) } func (b *Binder) ObjectPosition(typ types.Object) token.Position { if typ == nil { return token.Position{ Filename: "unknown", } } pkg := b.pkgs.Load(typ.Pkg().Path()) return pkg.Fset.Position(typ.Pos()) } func (b *Binder) FindTypeFromName(name string) (types.Type, error) { pkgName, typeName := code.PkgAndType(name) return b.FindType(pkgName, typeName) } func (b *Binder) FindType(pkgName string, typeName string) (types.Type, error) { if pkgName == "" { if typeName == "map[string]interface{}" { return MapType, nil } if typeName == "interface{}" { return InterfaceType, nil } } obj, err := b.FindObject(pkgName, typeName) if err != nil { return nil, err } if fun, isFunc := obj.(*types.Func); isFunc { return fun.Type().(*types.Signature).Params().At(0).Type(), nil } return obj.Type(), nil } func (b *Binder) InstantiateType(orig types.Type, targs []types.Type) (types.Type, error) { if b.tctx == nil { b.tctx = types.NewContext() } return types.Instantiate(b.tctx, orig, targs, false) } var ( MapType = types.NewMap(types.Typ[types.String], types.NewInterfaceType(nil, nil).Complete()) InterfaceType = types.NewInterfaceType(nil, nil) ) func (b *Binder) DefaultUserObject(name string) (types.Type, error) { models := b.cfg.Models[name].Model if len(models) == 0 { return nil, fmt.Errorf(name + " not found in typemap") } if models[0] == "map[string]interface{}" { return MapType, nil } if models[0] == "interface{}" { return InterfaceType, nil } pkgName, typeName := code.PkgAndType(models[0]) if pkgName == "" { return nil, fmt.Errorf("missing package name for %s", name) } obj, err := b.FindObject(pkgName, typeName) if err != nil { return nil, err } return obj.Type(), nil } func (b *Binder) FindObject(pkgName string, typeName string) (types.Object, error) { if pkgName == "" { return nil, fmt.Errorf("package cannot be nil") } pkg := b.pkgs.LoadWithTypes(pkgName) if pkg == nil { err := b.pkgs.Errors() if err != nil { return nil, fmt.Errorf("package could not be loaded: %s.%s: %w", pkgName, typeName, err) } return nil, fmt.Errorf("required package was not loaded: %s.%s", pkgName, typeName) } if b.objectCache == nil { b.objectCache = make(map[string]map[string]types.Object, b.pkgs.Count()) } defsIndex, ok := b.objectCache[pkgName] if !ok { defsIndex = indexDefs(pkg) b.objectCache[pkgName] = defsIndex } // function based marshalers take precedence if val, ok := defsIndex["Marshal"+typeName]; ok { return val, nil } if val, ok := defsIndex[typeName]; ok { return val, nil } return nil, fmt.Errorf("%w: %s.%s", ErrTypeNotFound, pkgName, typeName) } func indexDefs(pkg *packages.Package) map[string]types.Object { res := make(map[string]types.Object) scope := pkg.Types.Scope() for astNode, def := range pkg.TypesInfo.Defs { // only look at defs in the top scope if def == nil { continue } parent := def.Parent() if parent == nil || parent != scope { continue } if _, ok := res[astNode.Name]; !ok { // The above check may not be really needed, it is only here to have a consistent behavior with // previous implementation of FindObject() function which only honored the first inclusion of a def. // If this is still needed, we can consider something like sync.Map.LoadOrStore() to avoid two lookups. res[astNode.Name] = def } } return res } func (b *Binder) PointerTo(ref *TypeReference) *TypeReference { newRef := *ref newRef.GO = types.NewPointer(ref.GO) b.References = append(b.References, &newRef) return &newRef } // TypeReference is used by args and field types. The Definition can refer to both input and output types. type TypeReference struct { Definition *ast.Definition GQL *ast.Type GO types.Type // Type of the field being bound. Could be a pointer or a value type of Target. Target types.Type // The actual type that we know how to bind to. May require pointer juggling when traversing to fields. CastType types.Type // Before calling marshalling functions cast from/to this base type Marshaler *types.Func // When using external marshalling functions this will point to the Marshal function Unmarshaler *types.Func // When using external marshalling functions this will point to the Unmarshal function IsMarshaler bool // Does the type implement graphql.Marshaler and graphql.Unmarshaler IsOmittable bool // Is the type wrapped with Omittable IsContext bool // Is the Marshaler/Unmarshaller the context version; applies to either the method or interface variety. PointersInUmarshalInput bool // Inverse values and pointers in return. } func (ref *TypeReference) Elem() *TypeReference { if p, isPtr := ref.GO.(*types.Pointer); isPtr { newRef := *ref newRef.GO = p.Elem() return &newRef } if ref.IsSlice() { newRef := *ref newRef.GO = ref.GO.(*types.Slice).Elem() newRef.GQL = ref.GQL.Elem return &newRef } return nil } func (ref *TypeReference) IsPtr() bool { _, isPtr := ref.GO.(*types.Pointer) return isPtr } // fix for https://github.com/golang/go/issues/31103 may make it possible to remove this (may still be useful) func (ref *TypeReference) IsPtrToPtr() bool { if p, isPtr := ref.GO.(*types.Pointer); isPtr { _, isPtr := p.Elem().(*types.Pointer) return isPtr } return false } func (ref *TypeReference) IsNilable() bool { return IsNilable(ref.GO) } func (ref *TypeReference) IsSlice() bool { _, isSlice := ref.GO.(*types.Slice) return ref.GQL.Elem != nil && isSlice } func (ref *TypeReference) IsPtrToSlice() bool { if ref.IsPtr() { _, isPointerToSlice := ref.GO.(*types.Pointer).Elem().(*types.Slice) return isPointerToSlice } return false } func (ref *TypeReference) IsPtrToIntf() bool { if ref.IsPtr() { _, isPointerToInterface := ref.GO.(*types.Pointer).Elem().(*types.Interface) return isPointerToInterface } return false } func (ref *TypeReference) IsNamed() bool { _, isSlice := ref.GO.(*types.Named) return isSlice } func (ref *TypeReference) IsStruct() bool { _, isStruct := ref.GO.Underlying().(*types.Struct) return isStruct } func (ref *TypeReference) IsScalar() bool { return ref.Definition.Kind == ast.Scalar } func (ref *TypeReference) UniquenessKey() string { nullability := "O" if ref.GQL.NonNull { nullability = "N" } elemNullability := "" if ref.GQL.Elem != nil && ref.GQL.Elem.NonNull { // Fix for #896 elemNullability = "ᚄ" } return nullability + ref.Definition.Name + "2" + TypeIdentifier(ref.GO) + elemNullability } func (ref *TypeReference) MarshalFunc() string { if ref.Definition == nil { panic(errors.New("Definition missing for " + ref.GQL.Name())) } if ref.Definition.Kind == ast.InputObject { return "" } return "marshal" + ref.UniquenessKey() } func (ref *TypeReference) UnmarshalFunc() string { if ref.Definition == nil { panic(errors.New("Definition missing for " + ref.GQL.Name())) } if !ref.Definition.IsInputType() { return "" } return "unmarshal" + ref.UniquenessKey() } func (ref *TypeReference) IsTargetNilable() bool { return IsNilable(ref.Target) } func (b *Binder) PushRef(ret *TypeReference) { b.References = append(b.References, ret) } func isMap(t types.Type) bool { if t == nil { return true } _, ok := t.(*types.Map) return ok } func isIntf(t types.Type) bool { if t == nil { return true } _, ok := t.(*types.Interface) return ok } func unwrapOmittable(t types.Type) (types.Type, bool) { if t == nil {
named, ok := t.(*types.Named) if !ok { return t, false } if named.Origin().String() != "github.com/99designs/gqlgen/graphql.Omittable[T any]" { return t, false } return named.TypeArgs().At(0), true } func (b *Binder) TypeReference(schemaType *ast.Type, bindTarget types.Type) (ret *TypeReference, err error) { if innerType, ok := unwrapOmittable(bindTarget); ok { if schemaType.NonNull { return nil, fmt.Errorf("%s is wrapped with Omittable but non-null", schemaType.Name()) } ref, err := b.TypeReference(schemaType, innerType) if err != nil { return nil, err } ref.IsOmittable = true return ref, err } if !isValid(bindTarget) { b.SawInvalid = true return nil, fmt.Errorf("%s has an invalid type", schemaType.Name()) } var pkgName, typeName string def := b.schema.Types[schemaType.Name()] defer func() { if err == nil && ret != nil { b.PushRef(ret) } }() if len(b.cfg.Models[schemaType.Name()].Model) == 0 { return nil, fmt.Errorf("%s was not found", schemaType.Name()) } for _, model := range b.cfg.Models[schemaType.Name()].Model { if model == "map[string]interface{}" { if !isMap(bindTarget) { continue } return &TypeReference{ Definition: def, GQL: schemaType, GO: MapType, }, nil } if model == "interface{}" { if !isIntf(bindTarget) { continue } return &TypeReference{ Definition: def, GQL: schemaType, GO: InterfaceType, }, nil } pkgName, typeName = code.PkgAndType(model) if pkgName == "" { return nil, fmt.Errorf("missing package name for %s", schemaType.Name()) } ref := &TypeReference{ Definition: def, GQL: schemaType, } obj, err := b.FindObject(pkgName, typeName) if err != nil { return nil, err } if fun, isFunc := obj.(*types.Func); isFunc { ref.GO = fun.Type().(*types.Signature).Params().At(0).Type() ref.IsContext = fun.Type().(*types.Signature).Results().At(0).Type().String() == "github.com/99designs/gqlgen/graphql.ContextMarshaler" ref.Marshaler = fun ref.Unmarshaler = types.NewFunc(0, fun.Pkg(), "Unmarshal"+typeName, nil) } else if hasMethod(obj.Type(), "MarshalGQLContext") && hasMethod(obj.Type(), "UnmarshalGQLContext") { ref.GO = obj.Type() ref.IsContext = true ref.IsMarshaler = true } else if hasMethod(obj.Type(), "MarshalGQL") && hasMethod(obj.Type(), "UnmarshalGQL") { ref.GO = obj.Type() ref.IsMarshaler = true } else if underlying := basicUnderlying(obj.Type()); def.IsLeafType() && underlying != nil && underlying.Kind() == types.String { // TODO delete before v1. Backwards compatibility case for named types wrapping strings (see #595) ref.GO = obj.Type() ref.CastType = underlying underlyingRef, err := b.TypeReference(&ast.Type{NamedType: "String"}, nil) if err != nil { return nil, err } ref.Marshaler = underlyingRef.Marshaler ref.Unmarshaler = underlyingRef.Unmarshaler } else { ref.GO = obj.Type() } ref.Target = ref.GO ref.GO = b.CopyModifiersFromAst(schemaType, ref.GO) if bindTarget != nil { if err = code.CompatibleTypes(ref.GO, bindTarget); err != nil { continue } ref.GO = bindTarget } ref.PointersInUmarshalInput = b.cfg.ReturnPointersInUmarshalInput return ref, nil } return nil, fmt.Errorf("%s is incompatible with %s", schemaType.Name(), bindTarget.String()) } func isValid(t types.Type) bool { basic, isBasic := t.(*types.Basic) if !isBasic { return true } return basic.Kind() != types.Invalid } func (b *Binder) CopyModifiersFromAst(t *ast.Type, base types.Type) types.Type { if t.Elem != nil { child := b.CopyModifiersFromAst(t.Elem, base) if _, isStruct := child.Underlying().(*types.Struct); isStruct && !b.cfg.OmitSliceElementPointers { child = types.NewPointer(child) } return types.NewSlice(child) } var isInterface bool if named, ok := base.(*types.Named); ok { _, isInterface = named.Underlying().(*types.Interface) } if !isInterface && !IsNilable(base) && !t.NonNull { return types.NewPointer(base) } return base } func IsNilable(t types.Type) bool { if namedType, isNamed := t.(*types.Named); isNamed { return IsNilable(namedType.Underlying()) } _, isPtr := t.(*types.Pointer) _, isMap := t.(*types.Map) _, isInterface := t.(*types.Interface) _, isSlice := t.(*types.Slice) _, isChan := t.(*types.Chan) return isPtr || isMap || isInterface || isSlice || isChan } func hasMethod(it types.Type, name string) bool { if ptr, isPtr := it.(*types.Pointer); isPtr { it = ptr.Elem() } namedType, ok := it.(*types.Named) if !ok { return false } for i := 0; i < namedType.NumMethods(); i++ { if namedType.Method(i).Name() == name { return true } } return false } func basicUnderlying(it types.Type) *types.Basic { if ptr, isPtr := it.(*types.Pointer); isPtr { it = ptr.Elem() } namedType, ok := it.(*types.Named) if !ok { return nil } if basic, ok := namedType.Underlying().(*types.Basic); ok { return basic } return nil } var pkgReplacer = strings.NewReplacer( "/", "ᚋ", ".", "ᚗ", "-", "ᚑ", "~", "א", ) func TypeIdentifier(t types.Type) string { res := "" for { switch it := t.(type) { case *types.Pointer: t.Underlying() res += "ᚖ" t = it.Elem() case *types.Slice: res += "ᚕ" t = it.Elem() case *types.Named: res += pkgReplacer.Replace(it.Obj().Pkg().Path()) res += "ᚐ" res += it.Obj().Name() return res case *types.Basic: res += it.Name() return res case *types.Map: res += "map" return res case *types.Interface: res += "interface" return res default: panic(fmt.Errorf("unexpected type %T", it)) } } }
return t, false }
conditional_block
tools.py
# !/usr/bin/env python # -*- coding: utf-8 -*- """ Codes are borrowed from https://github.com/facebookresearch/SentEval/blob/master/senteval/tools/classifier.py and https://github.com/ganeshjawahar/interpret_bert with small modifications and our own implement of a simple self-attention layer Pytorch Classifier class in the style of scikit-learn Classifiers include Logistic Regression and MLP """ from __future__ import absolute_import, division, unicode_literals import os import sys root_path = '/'.join(os.path.realpath(__file__).split('/')[:-2]) if root_path not in sys.path: sys.path.append(root_path) import numpy as np import copy from senteval import utils import torch from torch import nn from torch.utils.data import TensorDataset, DataLoader, SequentialSampler, RandomSampler import torch.nn.functional as F from tqdm import tqdm class PyTorchClassifier(object): def __init__(self, inputdim, nclasses, l2reg=0., batch_size=64, seed=1111, cudaEfficient=False): # fix seed np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed(seed) self.inputdim = inputdim self.nclasses = nclasses self.l2reg = l2reg self.batch_size = batch_size self.cudaEfficient = cudaEfficient def prepare_data(self, args, features): all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long) all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long) all_example_index = torch.arange(all_input_ids.size(0), dtype=torch.long) all_segment_ids = torch.tensor([f.input_type_ids for f in features], dtype=torch.long) all_label_ids = torch.tensor([f.label_id for f in features], dtype=torch.long) eval_data = TensorDataset(all_input_ids, all_input_mask, all_example_index, all_segment_ids, all_label_ids) eval_sampler = RandomSampler(eval_data) eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.batch_size) return eval_dataloader, eval_sampler def fit(self, args, model, tokenizer, train_x, train_y, dev_x, dev_y, validation_split=None, early_stop=True): self.nepoch = 0 bestaccuracy = -1 stop_train = False early_stop_count = 0 # Preparing validation data train_dataloader, train_sampler = self.prepare_data(args, train_x) # Training while not stop_train and self.nepoch <= self.max_epoch: self.trainepoch(args, model, tokenizer, train_dataloader, epoch_size=self.epoch_size) accuracy = self.score(args, model, tokenizer, dev_x) if accuracy > bestaccuracy: bestaccuracy = accuracy bestmodel = copy.deepcopy(self.model) elif early_stop: if early_stop_count >= self.tenacity: stop_train = True early_stop_count += 1 self.model = bestmodel return bestaccuracy def trainepoch(self, args, model, tokenizer, train_dataloader, epoch_size=1, log_step = 50): all_costs = [] for _ in range(self.nepoch, self.nepoch + epoch_size): for step, batch in enumerate(train_dataloader): batch = tuple(t.to(args.device) for t in batch) inputs = {'input_ids': batch[0], 'attention_mask': batch[1], 'token_type_ids': batch[3]} ybatch = batch[4] with torch.no_grad(): _, _, all_encoder_layers = model(**inputs) layer_output = all_encoder_layers[args.layer] self.model.train() output = self.model(layer_output, batch[1].type(torch.cuda.FloatTensor)) loss = self.loss_fn(output, ybatch) all_costs.append(loss.data.item()) # backward self.optimizer.zero_grad() loss.backward() # Update parameters self.optimizer.step() self.nepoch += epoch_size def score(self, args, model, tokenizer, dev_x): dev_dataloader, dev_sampler = self.prepare_data(args, dev_x) self.model.eval() correct = 0 all = 0 with torch.no_grad(): for step, batch in enumerate(dev_dataloader): batch = tuple(t.to(args.device) for t in batch) inputs = {'input_ids': batch[0], 'attention_mask': batch[1], 'token_type_ids': batch[3]} ybatch = batch[4] with torch.no_grad(): _, _, all_encoder_layers = model(**inputs) layer_output = all_encoder_layers[args.layer] output = self.model(layer_output, batch[1].type(torch.cuda.FloatTensor)) output_pred = output.cpu().data.tolist() pred = [] for p in output_pred: pred.append(0 if p[0] > p[1] else 1) yb = ybatch.data.tolist() for p, g in zip(pred, yb): all += 1 if p == g: correct += 1 accuracy = 1.0 * correct / all return accuracy def predict(self, devX): self.model.eval() devX = torch.FloatTensor(devX).cuda() yhat = np.array([]) with torch.no_grad(): for i in range(0, len(devX), self.batch_size): Xbatch = devX[i:i + self.batch_size] output = self.model(Xbatch) yhat = np.append(yhat, output.data.max(1)[1].cpu().numpy()) yhat = np.vstack(yhat) return yhat def predict_proba(self, devX): self.model.eval() probas = [] with torch.no_grad(): for i in range(0, len(devX), self.batch_size): Xbatch = devX[i:i + self.batch_size] vals = F.softmax(self.model(Xbatch).data.cpu().numpy()) if not probas:
""" MLP with Pytorch (nhid=0 --> Logistic Regression) """ class MLP(PyTorchClassifier): def __init__(self, params, inputdim, nclasses, l2reg=0., batch_size=64, seed=1111, cudaEfficient=False): super(self.__class__, self).__init__(inputdim, nclasses, l2reg, batch_size, seed, cudaEfficient) """ PARAMETERS: -nhid: number of hidden units (0: Logistic Regression) -optim: optimizer ("sgd,lr=0.1", "adam", "rmsprop" ..) -tenacity: how many times dev acc does not increase before stopping -epoch_size: each epoch corresponds to epoch_size pass on the train set -max_epoch: max number of epoches -dropout: dropout for MLP """ self.nhid = 0 if "nhid" not in params else params["nhid"] self.optim = "adam" if "optim" not in params else params["optim"] self.tenacity = 5 if "tenacity" not in params else params["tenacity"] self.epoch_size = 4 if "epoch_size" not in params else params["epoch_size"] self.max_epoch = 200 if "max_epoch" not in params else params["max_epoch"] self.dropout = 0. if "dropout" not in params else params["dropout"] self.batch_size = 64 if "batch_size" not in params else params["batch_size"] if params["nhid"] == 0: self.model = nn.Sequential( nn.Linear(self.inputdim, self.nclasses), ).cuda() else: self.model = nn.Sequential( nn.Linear(self.inputdim, params["nhid"]), nn.Dropout(p=self.dropout), nn.Sigmoid(), nn.Linear(params["nhid"], self.nclasses), ).cuda() self.loss_fn = nn.CrossEntropyLoss().cuda() self.loss_fn.size_average = False optim_fn, optim_params = utils.get_optimizer(self.optim) self.optimizer = optim_fn(self.model.parameters(), **optim_params) self.optimizer.param_groups[0]['weight_decay'] = self.l2reg class self_attn_mlp(PyTorchClassifier): def __init__(self, params, inputdim, nclasses, l2reg=0., batch_size=64, seed=1111, cudaEfficient=False): super(self.__class__, self).__init__(inputdim, nclasses, l2reg, batch_size, seed, cudaEfficient) """ PARAMETERS: -nhid: number of hidden units (0: Logistic Regression) -optim: optimizer ("sgd,lr=0.1", "adam", "rmsprop" ..) -tenacity: how many times dev acc does not increase before stopping -epoch_size: each epoch corresponds to epoch_size pass on the train set -max_epoch: max number of epoches -dropout: dropout for MLP """ self.nhid = 0 if "nhid" not in params else params["nhid"] self.optim = "adam" if "optim" not in params else params["optim"] self.tenacity = 5 if "tenacity" not in params else params["tenacity"] self.epoch_size = 10 if "epoch_size" not in params else params["epoch_size"] self.max_epoch = 200 if "max_epoch" not in params else params["max_epoch"] self.dropout = 0. if "dropout" not in params else params["dropout"] self.batch_size = 64 if "batch_size" not in params else params["batch_size"] self.model = self_attn(self.inputdim, self.dropout, self.nhid, nclasses).cuda() self.loss_fn = nn.CrossEntropyLoss().cuda() self.loss_fn.size_average = False optim_fn, optim_params = utils.get_optimizer(self.optim) self.optimizer = optim_fn(self.model.parameters(), **optim_params) self.optimizer.param_groups[0]['weight_decay'] = self.l2reg class self_attn(nn.Module): def __init__(self, input_dim, dropout, nhid, nlabels): super(self.__class__, self).__init__() self._input_dim = input_dim self.Ws1 = nn.Linear(input_dim, input_dim // 2) self.Ws2 = nn.Linear(input_dim // 2, 1) self.dropout = dropout self.tanh = nn.Tanh() self.attn_dropout = nn.Dropout(p=self.dropout) self.attn_softmax = nn.Softmax(dim=1) self.cls = nn.Linear(input_dim, nlabels) def forward(self, Xbatch, mask): value = self.attn_dropout(self.Ws1(Xbatch)) attn_score = self.Ws2(self.tanh(value)) mask = (1.0 - mask.unsqueeze(2)) * -10000.0 attn_score = attn_score + mask attn_score = self.attn_softmax(attn_score) attn_output = torch.bmm(attn_score.transpose(1,2), Xbatch).squeeze(1) output = self.cls(attn_output) return output
probas = vals else: probas = np.concatenate(probas, vals, axis=0) return probas
random_line_split
tools.py
# !/usr/bin/env python # -*- coding: utf-8 -*- """ Codes are borrowed from https://github.com/facebookresearch/SentEval/blob/master/senteval/tools/classifier.py and https://github.com/ganeshjawahar/interpret_bert with small modifications and our own implement of a simple self-attention layer Pytorch Classifier class in the style of scikit-learn Classifiers include Logistic Regression and MLP """ from __future__ import absolute_import, division, unicode_literals import os import sys root_path = '/'.join(os.path.realpath(__file__).split('/')[:-2]) if root_path not in sys.path: sys.path.append(root_path) import numpy as np import copy from senteval import utils import torch from torch import nn from torch.utils.data import TensorDataset, DataLoader, SequentialSampler, RandomSampler import torch.nn.functional as F from tqdm import tqdm class PyTorchClassifier(object): def __init__(self, inputdim, nclasses, l2reg=0., batch_size=64, seed=1111, cudaEfficient=False): # fix seed np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed(seed) self.inputdim = inputdim self.nclasses = nclasses self.l2reg = l2reg self.batch_size = batch_size self.cudaEfficient = cudaEfficient def prepare_data(self, args, features):
def fit(self, args, model, tokenizer, train_x, train_y, dev_x, dev_y, validation_split=None, early_stop=True): self.nepoch = 0 bestaccuracy = -1 stop_train = False early_stop_count = 0 # Preparing validation data train_dataloader, train_sampler = self.prepare_data(args, train_x) # Training while not stop_train and self.nepoch <= self.max_epoch: self.trainepoch(args, model, tokenizer, train_dataloader, epoch_size=self.epoch_size) accuracy = self.score(args, model, tokenizer, dev_x) if accuracy > bestaccuracy: bestaccuracy = accuracy bestmodel = copy.deepcopy(self.model) elif early_stop: if early_stop_count >= self.tenacity: stop_train = True early_stop_count += 1 self.model = bestmodel return bestaccuracy def trainepoch(self, args, model, tokenizer, train_dataloader, epoch_size=1, log_step = 50): all_costs = [] for _ in range(self.nepoch, self.nepoch + epoch_size): for step, batch in enumerate(train_dataloader): batch = tuple(t.to(args.device) for t in batch) inputs = {'input_ids': batch[0], 'attention_mask': batch[1], 'token_type_ids': batch[3]} ybatch = batch[4] with torch.no_grad(): _, _, all_encoder_layers = model(**inputs) layer_output = all_encoder_layers[args.layer] self.model.train() output = self.model(layer_output, batch[1].type(torch.cuda.FloatTensor)) loss = self.loss_fn(output, ybatch) all_costs.append(loss.data.item()) # backward self.optimizer.zero_grad() loss.backward() # Update parameters self.optimizer.step() self.nepoch += epoch_size def score(self, args, model, tokenizer, dev_x): dev_dataloader, dev_sampler = self.prepare_data(args, dev_x) self.model.eval() correct = 0 all = 0 with torch.no_grad(): for step, batch in enumerate(dev_dataloader): batch = tuple(t.to(args.device) for t in batch) inputs = {'input_ids': batch[0], 'attention_mask': batch[1], 'token_type_ids': batch[3]} ybatch = batch[4] with torch.no_grad(): _, _, all_encoder_layers = model(**inputs) layer_output = all_encoder_layers[args.layer] output = self.model(layer_output, batch[1].type(torch.cuda.FloatTensor)) output_pred = output.cpu().data.tolist() pred = [] for p in output_pred: pred.append(0 if p[0] > p[1] else 1) yb = ybatch.data.tolist() for p, g in zip(pred, yb): all += 1 if p == g: correct += 1 accuracy = 1.0 * correct / all return accuracy def predict(self, devX): self.model.eval() devX = torch.FloatTensor(devX).cuda() yhat = np.array([]) with torch.no_grad(): for i in range(0, len(devX), self.batch_size): Xbatch = devX[i:i + self.batch_size] output = self.model(Xbatch) yhat = np.append(yhat, output.data.max(1)[1].cpu().numpy()) yhat = np.vstack(yhat) return yhat def predict_proba(self, devX): self.model.eval() probas = [] with torch.no_grad(): for i in range(0, len(devX), self.batch_size): Xbatch = devX[i:i + self.batch_size] vals = F.softmax(self.model(Xbatch).data.cpu().numpy()) if not probas: probas = vals else: probas = np.concatenate(probas, vals, axis=0) return probas """ MLP with Pytorch (nhid=0 --> Logistic Regression) """ class MLP(PyTorchClassifier): def __init__(self, params, inputdim, nclasses, l2reg=0., batch_size=64, seed=1111, cudaEfficient=False): super(self.__class__, self).__init__(inputdim, nclasses, l2reg, batch_size, seed, cudaEfficient) """ PARAMETERS: -nhid: number of hidden units (0: Logistic Regression) -optim: optimizer ("sgd,lr=0.1", "adam", "rmsprop" ..) -tenacity: how many times dev acc does not increase before stopping -epoch_size: each epoch corresponds to epoch_size pass on the train set -max_epoch: max number of epoches -dropout: dropout for MLP """ self.nhid = 0 if "nhid" not in params else params["nhid"] self.optim = "adam" if "optim" not in params else params["optim"] self.tenacity = 5 if "tenacity" not in params else params["tenacity"] self.epoch_size = 4 if "epoch_size" not in params else params["epoch_size"] self.max_epoch = 200 if "max_epoch" not in params else params["max_epoch"] self.dropout = 0. if "dropout" not in params else params["dropout"] self.batch_size = 64 if "batch_size" not in params else params["batch_size"] if params["nhid"] == 0: self.model = nn.Sequential( nn.Linear(self.inputdim, self.nclasses), ).cuda() else: self.model = nn.Sequential( nn.Linear(self.inputdim, params["nhid"]), nn.Dropout(p=self.dropout), nn.Sigmoid(), nn.Linear(params["nhid"], self.nclasses), ).cuda() self.loss_fn = nn.CrossEntropyLoss().cuda() self.loss_fn.size_average = False optim_fn, optim_params = utils.get_optimizer(self.optim) self.optimizer = optim_fn(self.model.parameters(), **optim_params) self.optimizer.param_groups[0]['weight_decay'] = self.l2reg class self_attn_mlp(PyTorchClassifier): def __init__(self, params, inputdim, nclasses, l2reg=0., batch_size=64, seed=1111, cudaEfficient=False): super(self.__class__, self).__init__(inputdim, nclasses, l2reg, batch_size, seed, cudaEfficient) """ PARAMETERS: -nhid: number of hidden units (0: Logistic Regression) -optim: optimizer ("sgd,lr=0.1", "adam", "rmsprop" ..) -tenacity: how many times dev acc does not increase before stopping -epoch_size: each epoch corresponds to epoch_size pass on the train set -max_epoch: max number of epoches -dropout: dropout for MLP """ self.nhid = 0 if "nhid" not in params else params["nhid"] self.optim = "adam" if "optim" not in params else params["optim"] self.tenacity = 5 if "tenacity" not in params else params["tenacity"] self.epoch_size = 10 if "epoch_size" not in params else params["epoch_size"] self.max_epoch = 200 if "max_epoch" not in params else params["max_epoch"] self.dropout = 0. if "dropout" not in params else params["dropout"] self.batch_size = 64 if "batch_size" not in params else params["batch_size"] self.model = self_attn(self.inputdim, self.dropout, self.nhid, nclasses).cuda() self.loss_fn = nn.CrossEntropyLoss().cuda() self.loss_fn.size_average = False optim_fn, optim_params = utils.get_optimizer(self.optim) self.optimizer = optim_fn(self.model.parameters(), **optim_params) self.optimizer.param_groups[0]['weight_decay'] = self.l2reg class self_attn(nn.Module): def __init__(self, input_dim, dropout, nhid, nlabels): super(self.__class__, self).__init__() self._input_dim = input_dim self.Ws1 = nn.Linear(input_dim, input_dim // 2) self.Ws2 = nn.Linear(input_dim // 2, 1) self.dropout = dropout self.tanh = nn.Tanh() self.attn_dropout = nn.Dropout(p=self.dropout) self.attn_softmax = nn.Softmax(dim=1) self.cls = nn.Linear(input_dim, nlabels) def forward(self, Xbatch, mask): value = self.attn_dropout(self.Ws1(Xbatch)) attn_score = self.Ws2(self.tanh(value)) mask = (1.0 - mask.unsqueeze(2)) * -10000.0 attn_score = attn_score + mask attn_score = self.attn_softmax(attn_score) attn_output = torch.bmm(attn_score.transpose(1,2), Xbatch).squeeze(1) output = self.cls(attn_output) return output
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long) all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long) all_example_index = torch.arange(all_input_ids.size(0), dtype=torch.long) all_segment_ids = torch.tensor([f.input_type_ids for f in features], dtype=torch.long) all_label_ids = torch.tensor([f.label_id for f in features], dtype=torch.long) eval_data = TensorDataset(all_input_ids, all_input_mask, all_example_index, all_segment_ids, all_label_ids) eval_sampler = RandomSampler(eval_data) eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.batch_size) return eval_dataloader, eval_sampler
identifier_body
tools.py
# !/usr/bin/env python # -*- coding: utf-8 -*- """ Codes are borrowed from https://github.com/facebookresearch/SentEval/blob/master/senteval/tools/classifier.py and https://github.com/ganeshjawahar/interpret_bert with small modifications and our own implement of a simple self-attention layer Pytorch Classifier class in the style of scikit-learn Classifiers include Logistic Regression and MLP """ from __future__ import absolute_import, division, unicode_literals import os import sys root_path = '/'.join(os.path.realpath(__file__).split('/')[:-2]) if root_path not in sys.path: sys.path.append(root_path) import numpy as np import copy from senteval import utils import torch from torch import nn from torch.utils.data import TensorDataset, DataLoader, SequentialSampler, RandomSampler import torch.nn.functional as F from tqdm import tqdm class PyTorchClassifier(object): def __init__(self, inputdim, nclasses, l2reg=0., batch_size=64, seed=1111, cudaEfficient=False): # fix seed np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed(seed) self.inputdim = inputdim self.nclasses = nclasses self.l2reg = l2reg self.batch_size = batch_size self.cudaEfficient = cudaEfficient def prepare_data(self, args, features): all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long) all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long) all_example_index = torch.arange(all_input_ids.size(0), dtype=torch.long) all_segment_ids = torch.tensor([f.input_type_ids for f in features], dtype=torch.long) all_label_ids = torch.tensor([f.label_id for f in features], dtype=torch.long) eval_data = TensorDataset(all_input_ids, all_input_mask, all_example_index, all_segment_ids, all_label_ids) eval_sampler = RandomSampler(eval_data) eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.batch_size) return eval_dataloader, eval_sampler def fit(self, args, model, tokenizer, train_x, train_y, dev_x, dev_y, validation_split=None, early_stop=True): self.nepoch = 0 bestaccuracy = -1 stop_train = False early_stop_count = 0 # Preparing validation data train_dataloader, train_sampler = self.prepare_data(args, train_x) # Training while not stop_train and self.nepoch <= self.max_epoch: self.trainepoch(args, model, tokenizer, train_dataloader, epoch_size=self.epoch_size) accuracy = self.score(args, model, tokenizer, dev_x) if accuracy > bestaccuracy: bestaccuracy = accuracy bestmodel = copy.deepcopy(self.model) elif early_stop: if early_stop_count >= self.tenacity: stop_train = True early_stop_count += 1 self.model = bestmodel return bestaccuracy def trainepoch(self, args, model, tokenizer, train_dataloader, epoch_size=1, log_step = 50): all_costs = [] for _ in range(self.nepoch, self.nepoch + epoch_size):
self.nepoch += epoch_size def score(self, args, model, tokenizer, dev_x): dev_dataloader, dev_sampler = self.prepare_data(args, dev_x) self.model.eval() correct = 0 all = 0 with torch.no_grad(): for step, batch in enumerate(dev_dataloader): batch = tuple(t.to(args.device) for t in batch) inputs = {'input_ids': batch[0], 'attention_mask': batch[1], 'token_type_ids': batch[3]} ybatch = batch[4] with torch.no_grad(): _, _, all_encoder_layers = model(**inputs) layer_output = all_encoder_layers[args.layer] output = self.model(layer_output, batch[1].type(torch.cuda.FloatTensor)) output_pred = output.cpu().data.tolist() pred = [] for p in output_pred: pred.append(0 if p[0] > p[1] else 1) yb = ybatch.data.tolist() for p, g in zip(pred, yb): all += 1 if p == g: correct += 1 accuracy = 1.0 * correct / all return accuracy def predict(self, devX): self.model.eval() devX = torch.FloatTensor(devX).cuda() yhat = np.array([]) with torch.no_grad(): for i in range(0, len(devX), self.batch_size): Xbatch = devX[i:i + self.batch_size] output = self.model(Xbatch) yhat = np.append(yhat, output.data.max(1)[1].cpu().numpy()) yhat = np.vstack(yhat) return yhat def predict_proba(self, devX): self.model.eval() probas = [] with torch.no_grad(): for i in range(0, len(devX), self.batch_size): Xbatch = devX[i:i + self.batch_size] vals = F.softmax(self.model(Xbatch).data.cpu().numpy()) if not probas: probas = vals else: probas = np.concatenate(probas, vals, axis=0) return probas """ MLP with Pytorch (nhid=0 --> Logistic Regression) """ class MLP(PyTorchClassifier): def __init__(self, params, inputdim, nclasses, l2reg=0., batch_size=64, seed=1111, cudaEfficient=False): super(self.__class__, self).__init__(inputdim, nclasses, l2reg, batch_size, seed, cudaEfficient) """ PARAMETERS: -nhid: number of hidden units (0: Logistic Regression) -optim: optimizer ("sgd,lr=0.1", "adam", "rmsprop" ..) -tenacity: how many times dev acc does not increase before stopping -epoch_size: each epoch corresponds to epoch_size pass on the train set -max_epoch: max number of epoches -dropout: dropout for MLP """ self.nhid = 0 if "nhid" not in params else params["nhid"] self.optim = "adam" if "optim" not in params else params["optim"] self.tenacity = 5 if "tenacity" not in params else params["tenacity"] self.epoch_size = 4 if "epoch_size" not in params else params["epoch_size"] self.max_epoch = 200 if "max_epoch" not in params else params["max_epoch"] self.dropout = 0. if "dropout" not in params else params["dropout"] self.batch_size = 64 if "batch_size" not in params else params["batch_size"] if params["nhid"] == 0: self.model = nn.Sequential( nn.Linear(self.inputdim, self.nclasses), ).cuda() else: self.model = nn.Sequential( nn.Linear(self.inputdim, params["nhid"]), nn.Dropout(p=self.dropout), nn.Sigmoid(), nn.Linear(params["nhid"], self.nclasses), ).cuda() self.loss_fn = nn.CrossEntropyLoss().cuda() self.loss_fn.size_average = False optim_fn, optim_params = utils.get_optimizer(self.optim) self.optimizer = optim_fn(self.model.parameters(), **optim_params) self.optimizer.param_groups[0]['weight_decay'] = self.l2reg class self_attn_mlp(PyTorchClassifier): def __init__(self, params, inputdim, nclasses, l2reg=0., batch_size=64, seed=1111, cudaEfficient=False): super(self.__class__, self).__init__(inputdim, nclasses, l2reg, batch_size, seed, cudaEfficient) """ PARAMETERS: -nhid: number of hidden units (0: Logistic Regression) -optim: optimizer ("sgd,lr=0.1", "adam", "rmsprop" ..) -tenacity: how many times dev acc does not increase before stopping -epoch_size: each epoch corresponds to epoch_size pass on the train set -max_epoch: max number of epoches -dropout: dropout for MLP """ self.nhid = 0 if "nhid" not in params else params["nhid"] self.optim = "adam" if "optim" not in params else params["optim"] self.tenacity = 5 if "tenacity" not in params else params["tenacity"] self.epoch_size = 10 if "epoch_size" not in params else params["epoch_size"] self.max_epoch = 200 if "max_epoch" not in params else params["max_epoch"] self.dropout = 0. if "dropout" not in params else params["dropout"] self.batch_size = 64 if "batch_size" not in params else params["batch_size"] self.model = self_attn(self.inputdim, self.dropout, self.nhid, nclasses).cuda() self.loss_fn = nn.CrossEntropyLoss().cuda() self.loss_fn.size_average = False optim_fn, optim_params = utils.get_optimizer(self.optim) self.optimizer = optim_fn(self.model.parameters(), **optim_params) self.optimizer.param_groups[0]['weight_decay'] = self.l2reg class self_attn(nn.Module): def __init__(self, input_dim, dropout, nhid, nlabels): super(self.__class__, self).__init__() self._input_dim = input_dim self.Ws1 = nn.Linear(input_dim, input_dim // 2) self.Ws2 = nn.Linear(input_dim // 2, 1) self.dropout = dropout self.tanh = nn.Tanh() self.attn_dropout = nn.Dropout(p=self.dropout) self.attn_softmax = nn.Softmax(dim=1) self.cls = nn.Linear(input_dim, nlabels) def forward(self, Xbatch, mask): value = self.attn_dropout(self.Ws1(Xbatch)) attn_score = self.Ws2(self.tanh(value)) mask = (1.0 - mask.unsqueeze(2)) * -10000.0 attn_score = attn_score + mask attn_score = self.attn_softmax(attn_score) attn_output = torch.bmm(attn_score.transpose(1,2), Xbatch).squeeze(1) output = self.cls(attn_output) return output
for step, batch in enumerate(train_dataloader): batch = tuple(t.to(args.device) for t in batch) inputs = {'input_ids': batch[0], 'attention_mask': batch[1], 'token_type_ids': batch[3]} ybatch = batch[4] with torch.no_grad(): _, _, all_encoder_layers = model(**inputs) layer_output = all_encoder_layers[args.layer] self.model.train() output = self.model(layer_output, batch[1].type(torch.cuda.FloatTensor)) loss = self.loss_fn(output, ybatch) all_costs.append(loss.data.item()) # backward self.optimizer.zero_grad() loss.backward() # Update parameters self.optimizer.step()
conditional_block
tools.py
# !/usr/bin/env python # -*- coding: utf-8 -*- """ Codes are borrowed from https://github.com/facebookresearch/SentEval/blob/master/senteval/tools/classifier.py and https://github.com/ganeshjawahar/interpret_bert with small modifications and our own implement of a simple self-attention layer Pytorch Classifier class in the style of scikit-learn Classifiers include Logistic Regression and MLP """ from __future__ import absolute_import, division, unicode_literals import os import sys root_path = '/'.join(os.path.realpath(__file__).split('/')[:-2]) if root_path not in sys.path: sys.path.append(root_path) import numpy as np import copy from senteval import utils import torch from torch import nn from torch.utils.data import TensorDataset, DataLoader, SequentialSampler, RandomSampler import torch.nn.functional as F from tqdm import tqdm class PyTorchClassifier(object): def __init__(self, inputdim, nclasses, l2reg=0., batch_size=64, seed=1111, cudaEfficient=False): # fix seed np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed(seed) self.inputdim = inputdim self.nclasses = nclasses self.l2reg = l2reg self.batch_size = batch_size self.cudaEfficient = cudaEfficient def
(self, args, features): all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long) all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long) all_example_index = torch.arange(all_input_ids.size(0), dtype=torch.long) all_segment_ids = torch.tensor([f.input_type_ids for f in features], dtype=torch.long) all_label_ids = torch.tensor([f.label_id for f in features], dtype=torch.long) eval_data = TensorDataset(all_input_ids, all_input_mask, all_example_index, all_segment_ids, all_label_ids) eval_sampler = RandomSampler(eval_data) eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.batch_size) return eval_dataloader, eval_sampler def fit(self, args, model, tokenizer, train_x, train_y, dev_x, dev_y, validation_split=None, early_stop=True): self.nepoch = 0 bestaccuracy = -1 stop_train = False early_stop_count = 0 # Preparing validation data train_dataloader, train_sampler = self.prepare_data(args, train_x) # Training while not stop_train and self.nepoch <= self.max_epoch: self.trainepoch(args, model, tokenizer, train_dataloader, epoch_size=self.epoch_size) accuracy = self.score(args, model, tokenizer, dev_x) if accuracy > bestaccuracy: bestaccuracy = accuracy bestmodel = copy.deepcopy(self.model) elif early_stop: if early_stop_count >= self.tenacity: stop_train = True early_stop_count += 1 self.model = bestmodel return bestaccuracy def trainepoch(self, args, model, tokenizer, train_dataloader, epoch_size=1, log_step = 50): all_costs = [] for _ in range(self.nepoch, self.nepoch + epoch_size): for step, batch in enumerate(train_dataloader): batch = tuple(t.to(args.device) for t in batch) inputs = {'input_ids': batch[0], 'attention_mask': batch[1], 'token_type_ids': batch[3]} ybatch = batch[4] with torch.no_grad(): _, _, all_encoder_layers = model(**inputs) layer_output = all_encoder_layers[args.layer] self.model.train() output = self.model(layer_output, batch[1].type(torch.cuda.FloatTensor)) loss = self.loss_fn(output, ybatch) all_costs.append(loss.data.item()) # backward self.optimizer.zero_grad() loss.backward() # Update parameters self.optimizer.step() self.nepoch += epoch_size def score(self, args, model, tokenizer, dev_x): dev_dataloader, dev_sampler = self.prepare_data(args, dev_x) self.model.eval() correct = 0 all = 0 with torch.no_grad(): for step, batch in enumerate(dev_dataloader): batch = tuple(t.to(args.device) for t in batch) inputs = {'input_ids': batch[0], 'attention_mask': batch[1], 'token_type_ids': batch[3]} ybatch = batch[4] with torch.no_grad(): _, _, all_encoder_layers = model(**inputs) layer_output = all_encoder_layers[args.layer] output = self.model(layer_output, batch[1].type(torch.cuda.FloatTensor)) output_pred = output.cpu().data.tolist() pred = [] for p in output_pred: pred.append(0 if p[0] > p[1] else 1) yb = ybatch.data.tolist() for p, g in zip(pred, yb): all += 1 if p == g: correct += 1 accuracy = 1.0 * correct / all return accuracy def predict(self, devX): self.model.eval() devX = torch.FloatTensor(devX).cuda() yhat = np.array([]) with torch.no_grad(): for i in range(0, len(devX), self.batch_size): Xbatch = devX[i:i + self.batch_size] output = self.model(Xbatch) yhat = np.append(yhat, output.data.max(1)[1].cpu().numpy()) yhat = np.vstack(yhat) return yhat def predict_proba(self, devX): self.model.eval() probas = [] with torch.no_grad(): for i in range(0, len(devX), self.batch_size): Xbatch = devX[i:i + self.batch_size] vals = F.softmax(self.model(Xbatch).data.cpu().numpy()) if not probas: probas = vals else: probas = np.concatenate(probas, vals, axis=0) return probas """ MLP with Pytorch (nhid=0 --> Logistic Regression) """ class MLP(PyTorchClassifier): def __init__(self, params, inputdim, nclasses, l2reg=0., batch_size=64, seed=1111, cudaEfficient=False): super(self.__class__, self).__init__(inputdim, nclasses, l2reg, batch_size, seed, cudaEfficient) """ PARAMETERS: -nhid: number of hidden units (0: Logistic Regression) -optim: optimizer ("sgd,lr=0.1", "adam", "rmsprop" ..) -tenacity: how many times dev acc does not increase before stopping -epoch_size: each epoch corresponds to epoch_size pass on the train set -max_epoch: max number of epoches -dropout: dropout for MLP """ self.nhid = 0 if "nhid" not in params else params["nhid"] self.optim = "adam" if "optim" not in params else params["optim"] self.tenacity = 5 if "tenacity" not in params else params["tenacity"] self.epoch_size = 4 if "epoch_size" not in params else params["epoch_size"] self.max_epoch = 200 if "max_epoch" not in params else params["max_epoch"] self.dropout = 0. if "dropout" not in params else params["dropout"] self.batch_size = 64 if "batch_size" not in params else params["batch_size"] if params["nhid"] == 0: self.model = nn.Sequential( nn.Linear(self.inputdim, self.nclasses), ).cuda() else: self.model = nn.Sequential( nn.Linear(self.inputdim, params["nhid"]), nn.Dropout(p=self.dropout), nn.Sigmoid(), nn.Linear(params["nhid"], self.nclasses), ).cuda() self.loss_fn = nn.CrossEntropyLoss().cuda() self.loss_fn.size_average = False optim_fn, optim_params = utils.get_optimizer(self.optim) self.optimizer = optim_fn(self.model.parameters(), **optim_params) self.optimizer.param_groups[0]['weight_decay'] = self.l2reg class self_attn_mlp(PyTorchClassifier): def __init__(self, params, inputdim, nclasses, l2reg=0., batch_size=64, seed=1111, cudaEfficient=False): super(self.__class__, self).__init__(inputdim, nclasses, l2reg, batch_size, seed, cudaEfficient) """ PARAMETERS: -nhid: number of hidden units (0: Logistic Regression) -optim: optimizer ("sgd,lr=0.1", "adam", "rmsprop" ..) -tenacity: how many times dev acc does not increase before stopping -epoch_size: each epoch corresponds to epoch_size pass on the train set -max_epoch: max number of epoches -dropout: dropout for MLP """ self.nhid = 0 if "nhid" not in params else params["nhid"] self.optim = "adam" if "optim" not in params else params["optim"] self.tenacity = 5 if "tenacity" not in params else params["tenacity"] self.epoch_size = 10 if "epoch_size" not in params else params["epoch_size"] self.max_epoch = 200 if "max_epoch" not in params else params["max_epoch"] self.dropout = 0. if "dropout" not in params else params["dropout"] self.batch_size = 64 if "batch_size" not in params else params["batch_size"] self.model = self_attn(self.inputdim, self.dropout, self.nhid, nclasses).cuda() self.loss_fn = nn.CrossEntropyLoss().cuda() self.loss_fn.size_average = False optim_fn, optim_params = utils.get_optimizer(self.optim) self.optimizer = optim_fn(self.model.parameters(), **optim_params) self.optimizer.param_groups[0]['weight_decay'] = self.l2reg class self_attn(nn.Module): def __init__(self, input_dim, dropout, nhid, nlabels): super(self.__class__, self).__init__() self._input_dim = input_dim self.Ws1 = nn.Linear(input_dim, input_dim // 2) self.Ws2 = nn.Linear(input_dim // 2, 1) self.dropout = dropout self.tanh = nn.Tanh() self.attn_dropout = nn.Dropout(p=self.dropout) self.attn_softmax = nn.Softmax(dim=1) self.cls = nn.Linear(input_dim, nlabels) def forward(self, Xbatch, mask): value = self.attn_dropout(self.Ws1(Xbatch)) attn_score = self.Ws2(self.tanh(value)) mask = (1.0 - mask.unsqueeze(2)) * -10000.0 attn_score = attn_score + mask attn_score = self.attn_softmax(attn_score) attn_output = torch.bmm(attn_score.transpose(1,2), Xbatch).squeeze(1) output = self.cls(attn_output) return output
prepare_data
identifier_name
google_drive_data.py
import csv import io import pickle import os import pip from googleapiclient.discovery import build from google_auth_oauthlib.flow import InstalledAppFlow from google.auth.transport.requests import Request from googleapiclient.http import MediaIoBaseDownload import cv2 import numpy as np SCOPES = ['https://www.googleapis.com/auth/drive.metadata', 'https://www.googleapis.com/auth/drive.file', 'https://www.googleapis.com/auth/drive'] def install(package): if hasattr(pip, 'main'): pip.main(['install', package]) else: pip._internal.main(['install', package]) def create_folder(service): file_metadata = { 'name': 'Test Techm', 'mimeType': 'application/vnd.google-apps.folder' } file = service.files().create(body=file_metadata, fields='id').execute() print('Folder ID: %s' % file.get('id')) def get_gdrive_service(): creds = None # The file token.pickle stores the user's access and refresh tokens, and is # created automatically when the authorization flow completes for the first # time. if os.path.exists('token.pickle'): with open('token.pickle', 'rb') as token: creds = pickle.load(token) # If there are no (valid) credentials available, let the user log in. if not creds or not creds.valid: if creds and creds.expired and creds.refresh_token: creds.refresh(Request()) else: flow = InstalledAppFlow.from_client_secrets_file( 'credentials.json', SCOPES) creds = flow.run_local_server(port=0) # Save the credentials for the next run with open('token.pickle', 'wb') as token: pickle.dump(creds, token) # return Google Drive API service return build('drive', 'v3', credentials=creds) def downloadFile(id, name): service = get_gdrive_service() request = service.files().get_media(fileId=id) fh = io.BytesIO() downloader = MediaIoBaseDownload(fh, request) done = False while done is False: status, done = downloader.next_chunk() with io.open("." + "/" + name, 'wb') as f: fh.seek(0) f.write(fh.read()) def is_duplicate(img1,img2): response=False image1 = cv2.imread(img1) image2 = cv2.imread(img2) try: difference = cv2.subtract(image1, image2) result = not np.any(difference) #if difference is all zeros it will return False if result is True: response=True #duplicate_image.append(list[i]) #print("{} images is matching with {} Occurred {} times ".format(img1,img1,list.count(img1))) except: i=0 return response def check_duplicate_image_new(items): print("Images is loading to memory..") #"""given items returned by Google Drive API, prints them in a tabular way""" map= {} list=[] message= set() duplicate_image=[] final_result={} if not items: print('No files found.') else: for item in items: if item["mimeType"] == "image/jpeg": list.append(item["name"]) #Creating Map value=[] value.append(item["name"]) value.append(item["webViewLink"]) if item["name"] in map: val=set() val.add(item["webViewLink"]) map[item["name"]]=item["webViewLink"] else: map[item["name"]]=item["webViewLink"] #Dowloading Image downloadFile(item["id"],item["name"]) match=[] flag=False for i in range(len(list)-1): temp=[] dp_count=0 flag=False if list[i] not in match : flag=True for j in range(i+1,len(list)): istrue=is_duplicate(list[i],list[j]) if istrue==True: dp_count=dp_count+1 temp.append(list[j]) if list[j] not in match: match.append(list[j]) if list[i] not in match: match.append(list[i]) if len(match)==0: match.append(list[i]) match.append(list[j]) if flag==True and dp_count !=0: #print(list[i]," - ",dp_count) final_result[list[i]]=temp m={} tdct=0 for x, y in final_result.items(): res=y tdct=tdct+len(res) s=set() for i in res: #s=set() for item in items: if item["mimeType"] == "image/jpeg": if item["name"]==i: s.add(item["webViewLink"]) m[x]=s return m,tdct def duplicate_image_list(imagelist): #print(len(imagelist)) dup_list = [] if len(imagelist) >= 1: for i in range(len(imagelist) - 1): count=0 l=[] for j in range(i + 1, len(imagelist)): image1 = cv2.imread(imagelist[i]) image2 = cv2.imread(imagelist[j]) try: difference = cv2.subtract(image1, image2) result = not np.any(difference) # if difference is all zeros it will return False if result is True: #print(imagelist[i],"Matching with ",imagelist[j]) l.append(imagelist[j]) count=count+1 dup_list.append(imagelist[i]) except: i = 0 return dup_list csv_map = {} def check_duplicate_image(items): # """given items returned by Google Drive API, prints them in a tabular way""" map = {} image_name_list = [] duplicate_image = [] for item in items: file_type = item["mimeType"] if file_type == "image/jpeg": image_name_list.append(item["name"]) #append url or # Creating Map value = [] value.append(item["name"]) value.append(item["webViewLink"]) map[item["id"]] = value csv_map[item["name"]] = item["webViewLink"] # Dowloading Image downloadFile(item["id"], item["name"]) duplicate_image = duplicate_image_list(image_name_list) return duplicate_image def renameFile(service,items, newName):
def count_image(id): imageList = [] service = get_gdrive_service() results = service.files().list(pageSize=1000, q="'{}' in parents".format(id)).execute() items = results.get('files', []) for item in items: mime_Type = item["mimeType"] if mime_Type == "image/jpeg": imageList.append(item["name"]) if mime_Type == "application/vnd.google-apps.folder": imageList.extend(count_image(item["id"])) return imageList def list_files(items, service): folder_count = 0 image_count = 0 imglist = [] count = 0 testtechm_id = '' nm_name = [] img_count = [] list_all_folder_name=[] rows = [] overview_map = {} img_nm=0 for item in items: name = item["name"] mime_type = item["mimeType"] if name == 'Test Techm': testtechm_id = item['parents'][0] for item in items: id = item["id"] name = item["name"] mime_type = item["mimeType"] if mime_type == "application/vnd.google-apps.folder": folder_count = folder_count + 1 if mime_type == "image/jpeg": # renameFile(item["id"],"rajj_img"+str(image_count)) image_count = image_count + 1 if mime_type == "application/vnd.google-apps.folder" and item["parents"][0] == testtechm_id: list_all_folder_name.append(item["name"]) name1 = count_image(id) nm_name.append(name1) img_count.append(len(name1)) overview_map[item["name"]] = name1 rows.append((id, name, mime_type, folder_count)) imglist.append(count) rows.append((id, name, mime_type, folder_count)) #duplicate_count = len(check_duplicate_image(items)) lt,duplicate_ct=check_duplicate_image_new(items) duplicateImagehtml(folder_count, image_count, duplicate_ct,items) # overview chart report page draw_chart_create_report(list_all_folder_name, image_count, duplicate_ct, overview_map,folder_count) def createDeviceCSV(): fileName = 'DuplicateImage.csv' with open(fileName, 'w') as csvFile: writer = csv.writer(csvFile) row = ["Image Name", 'Image Url'] writer.writerow(row) count = 0 for k, v in csv_map.items(): row = [k, v] writer.writerow(row) count = count + 1 #print("Device's adding into csv: " + str(count)) csvFile.close() #print('Device CSV File creation is Done file name is ', fileName) def duplicateImagehtml(folder_count, image_count, duplicate_ct,items): uri = [] map1,count=check_duplicate_image_new(items) for k, v in map1.items(): name_url = [] name_url.append(k) name_url.append(str(len(v))) name_url.append(str(v)) uri.append(name_url) fb = open('duplicateData.html', 'w') message = """ <html> <head> <script type="text/javascript" src="https://www.gstatic.com/charts/loader.js"></script> <script type="text/javascript"> google.charts.load('current', {'packages':['table']}); google.charts.setOnLoadCallback(drawTable); function drawTable() { var data3 = new google.visualization.DataTable(); data3.addColumn('string', 'Name'); data3.addColumn('string', 'Count'); data3.addRows([ ['Total Folders', '""" + str(folder_count) + """'], ['Total Images', '""" + str(image_count) + """'], ['Duplicate Images', '""" + str(duplicate_ct) + """']]); var table2 = new google.visualization.Table(document.getElementById('table_div_base')); table2.draw(data3, {showRowNumber: true, width: '100%', height: '100%'}); var data = new google.visualization.DataTable(); data.addColumn('string', 'Image Name'); data.addColumn('string', 'Image Count'); data.addColumn('string', 'Image Url'); data.addRows(""" + str(uri) + """); var table = new google.visualization.Table(document.getElementById('table_div')); table.draw(data, {showRowNumber: true, width: '100%', height: '100%'}); } </script> </head> <body><h2 style="text-align: center">Google Drive Summary Table</h2> <div id="table_div_base" style="width: 100%; height: 200px; display:inline-block;border-style: solid"></div> <h2 style="text-align: center" >List of Duplicate Image</h2> <div id="table_div" style="width: 100%; height: 500px; display:inline-block;border-style: solid"></div> </body></html>""" fb.write(message) fb.close() print("Duplicate image data preparing.. ") # webbrowser.open_new_tab('helloworld.html') def draw_chart_create_report(folder_count, image_count, duplicate_ct, map,folder_count_real): #folder_count=len(folder_count) fb = open('gDriveOverview.html', 'w') values = list(map.values()) newlist = [] folder_name = list(map.keys()) total_image_count = [] duplicate_image_count_in_folder = [] for v in values: newlist.append(duplicate_image_list(v)) total_image_count.append(len(v)) for n in newlist: duplicate_image_count_in_folder.append(len(n)) # create plot #print(total_image_count, duplicate_image_count_in_folder, map.keys()) m1 = """<html> <head> <h1 style ="color:black;text-align: center;font-size:25px;margin-left:-6px;margin-bottom:25px;width:1300px;float:left;">Google Drive Data Overview</h1> <script type="text/javascript" src="https://www.gstatic.com/charts/loader.js"></script> <script type="text/javascript"> google.charts.load('current', {'packages':['bar','corechart','table']}); google.charts.setOnLoadCallback(drawChart); function drawChart() { var paiData = google.visualization.arrayToDataTable([ ['Drive', 'Drive Data'], ['Total Images', """ + str(image_count) + """], ['Total duplicate Images', """ + str(duplicate_ct) + """], ['Total Folder', """ + str(folder_count_real) + """] ]); var paiOptions = { title: 'Google Drive Overview' }; var chart = new google.visualization.PieChart(document.getElementById('piechart')); chart.draw(paiData, paiOptions); var barData = google.visualization.arrayToDataTable(""" fb.write(m1) barchart_data = [] barchart_data.append(['Folders', 'Total no of Images', 'Total no of duplicate Images']) for i in range(len(values)): item_list = [] item_list.append(folder_count[i]) item_list.append(total_image_count[i]) item_list.append(duplicate_image_count_in_folder[i]) barchart_data.append(item_list) fb.write(m1) m3 = str(barchart_data) + """); var barOptions = { chart: { title: 'Google Drive Folderwise Overview', subtitle: 'This report is created on '+new Date(), }}; var chart = new google.charts.Bar(document.getElementById('bar_chart')); chart.draw(barData, google.charts.Bar.convertOptions(barOptions)); } </script> </head> <body> <div style="width:100%; margin:0px auto;"> <div id="piechart" style="width: 900px; height: 500px; display:inline-block;"></div> <div id="bar_chart" style="width: 900px; height: 500px; display:inline-block;"></div> </div> <div> <h2> <p style="float:right;color:red;">** <a href="duplicateData.html" target="_blank">Click here to know more about duplicate image data</a></p> </h2></div></body></html> """ fb.write(m3) fb.close() print("Bar and Pie chart creating.... ") def main(): service = get_gdrive_service() print("Wait a moment script is running ..!!!") results = service.files().list(pageSize=1000, fields="nextPageToken,files(id, name,mimeType,parents,webViewLink)").execute() items = results.get('files', []) if not items: # empty drive print('No files found.') else: # create_folder(service) print("-----_") name="g_image_" renameFile(service,items,name) print("==============================") #check_duplicate_image(items) # createDeviceCSV() list_files(items, service) if __name__ == '__main__': main() print("Script is done ..!!!")
count=1 for item in items: id = item["id"] name = item["name"] mime_type = item["mimeType"] file = service.files().get(fileId=id).execute() del file['id'] if "jpeg" in mime_type: file['name'] = newName+str(count)+ ".jpg"; if "png" in mime_type: file['name'] = newName+str(count)+ ".png"; updated_file = service.files().update(fileId=id, body=file).execute() count=count+1
identifier_body
google_drive_data.py
import csv import io import pickle import os import pip from googleapiclient.discovery import build from google_auth_oauthlib.flow import InstalledAppFlow from google.auth.transport.requests import Request from googleapiclient.http import MediaIoBaseDownload import cv2 import numpy as np SCOPES = ['https://www.googleapis.com/auth/drive.metadata', 'https://www.googleapis.com/auth/drive.file', 'https://www.googleapis.com/auth/drive'] def install(package): if hasattr(pip, 'main'): pip.main(['install', package]) else: pip._internal.main(['install', package]) def create_folder(service): file_metadata = { 'name': 'Test Techm', 'mimeType': 'application/vnd.google-apps.folder' } file = service.files().create(body=file_metadata, fields='id').execute() print('Folder ID: %s' % file.get('id')) def get_gdrive_service(): creds = None # The file token.pickle stores the user's access and refresh tokens, and is # created automatically when the authorization flow completes for the first # time. if os.path.exists('token.pickle'): with open('token.pickle', 'rb') as token: creds = pickle.load(token) # If there are no (valid) credentials available, let the user log in. if not creds or not creds.valid: if creds and creds.expired and creds.refresh_token: creds.refresh(Request()) else: flow = InstalledAppFlow.from_client_secrets_file( 'credentials.json', SCOPES) creds = flow.run_local_server(port=0) # Save the credentials for the next run with open('token.pickle', 'wb') as token: pickle.dump(creds, token) # return Google Drive API service return build('drive', 'v3', credentials=creds) def downloadFile(id, name): service = get_gdrive_service() request = service.files().get_media(fileId=id) fh = io.BytesIO() downloader = MediaIoBaseDownload(fh, request) done = False while done is False: status, done = downloader.next_chunk() with io.open("." + "/" + name, 'wb') as f: fh.seek(0) f.write(fh.read()) def is_duplicate(img1,img2): response=False image1 = cv2.imread(img1) image2 = cv2.imread(img2) try: difference = cv2.subtract(image1, image2) result = not np.any(difference) #if difference is all zeros it will return False if result is True: response=True #duplicate_image.append(list[i]) #print("{} images is matching with {} Occurred {} times ".format(img1,img1,list.count(img1))) except: i=0 return response def check_duplicate_image_new(items): print("Images is loading to memory..") #"""given items returned by Google Drive API, prints them in a tabular way""" map= {} list=[] message= set() duplicate_image=[] final_result={} if not items: print('No files found.') else: for item in items: if item["mimeType"] == "image/jpeg": list.append(item["name"]) #Creating Map value=[] value.append(item["name"]) value.append(item["webViewLink"]) if item["name"] in map: val=set() val.add(item["webViewLink"]) map[item["name"]]=item["webViewLink"] else: map[item["name"]]=item["webViewLink"] #Dowloading Image downloadFile(item["id"],item["name"]) match=[] flag=False for i in range(len(list)-1): temp=[] dp_count=0 flag=False if list[i] not in match : flag=True for j in range(i+1,len(list)): istrue=is_duplicate(list[i],list[j]) if istrue==True: dp_count=dp_count+1 temp.append(list[j]) if list[j] not in match: match.append(list[j]) if list[i] not in match: match.append(list[i]) if len(match)==0: match.append(list[i]) match.append(list[j]) if flag==True and dp_count !=0: #print(list[i]," - ",dp_count) final_result[list[i]]=temp m={} tdct=0 for x, y in final_result.items(): res=y tdct=tdct+len(res) s=set() for i in res: #s=set() for item in items: if item["mimeType"] == "image/jpeg": if item["name"]==i: s.add(item["webViewLink"]) m[x]=s return m,tdct def duplicate_image_list(imagelist): #print(len(imagelist)) dup_list = [] if len(imagelist) >= 1: for i in range(len(imagelist) - 1): count=0 l=[] for j in range(i + 1, len(imagelist)): image1 = cv2.imread(imagelist[i]) image2 = cv2.imread(imagelist[j]) try: difference = cv2.subtract(image1, image2) result = not np.any(difference) # if difference is all zeros it will return False if result is True: #print(imagelist[i],"Matching with ",imagelist[j]) l.append(imagelist[j]) count=count+1 dup_list.append(imagelist[i]) except: i = 0 return dup_list csv_map = {} def check_duplicate_image(items): # """given items returned by Google Drive API, prints them in a tabular way""" map = {} image_name_list = [] duplicate_image = [] for item in items: file_type = item["mimeType"] if file_type == "image/jpeg": image_name_list.append(item["name"]) #append url or # Creating Map value = [] value.append(item["name"]) value.append(item["webViewLink"]) map[item["id"]] = value csv_map[item["name"]] = item["webViewLink"] # Dowloading Image downloadFile(item["id"], item["name"]) duplicate_image = duplicate_image_list(image_name_list) return duplicate_image def renameFile(service,items, newName): count=1 for item in items: id = item["id"] name = item["name"] mime_type = item["mimeType"] file = service.files().get(fileId=id).execute() del file['id'] if "jpeg" in mime_type: file['name'] = newName+str(count)+ ".jpg"; if "png" in mime_type: file['name'] = newName+str(count)+ ".png"; updated_file = service.files().update(fileId=id, body=file).execute() count=count+1 def count_image(id): imageList = [] service = get_gdrive_service() results = service.files().list(pageSize=1000, q="'{}' in parents".format(id)).execute() items = results.get('files', []) for item in items: mime_Type = item["mimeType"] if mime_Type == "image/jpeg": imageList.append(item["name"]) if mime_Type == "application/vnd.google-apps.folder": imageList.extend(count_image(item["id"])) return imageList def list_files(items, service): folder_count = 0 image_count = 0 imglist = [] count = 0 testtechm_id = '' nm_name = [] img_count = [] list_all_folder_name=[] rows = [] overview_map = {} img_nm=0 for item in items:
for item in items: id = item["id"] name = item["name"] mime_type = item["mimeType"] if mime_type == "application/vnd.google-apps.folder": folder_count = folder_count + 1 if mime_type == "image/jpeg": # renameFile(item["id"],"rajj_img"+str(image_count)) image_count = image_count + 1 if mime_type == "application/vnd.google-apps.folder" and item["parents"][0] == testtechm_id: list_all_folder_name.append(item["name"]) name1 = count_image(id) nm_name.append(name1) img_count.append(len(name1)) overview_map[item["name"]] = name1 rows.append((id, name, mime_type, folder_count)) imglist.append(count) rows.append((id, name, mime_type, folder_count)) #duplicate_count = len(check_duplicate_image(items)) lt,duplicate_ct=check_duplicate_image_new(items) duplicateImagehtml(folder_count, image_count, duplicate_ct,items) # overview chart report page draw_chart_create_report(list_all_folder_name, image_count, duplicate_ct, overview_map,folder_count) def createDeviceCSV(): fileName = 'DuplicateImage.csv' with open(fileName, 'w') as csvFile: writer = csv.writer(csvFile) row = ["Image Name", 'Image Url'] writer.writerow(row) count = 0 for k, v in csv_map.items(): row = [k, v] writer.writerow(row) count = count + 1 #print("Device's adding into csv: " + str(count)) csvFile.close() #print('Device CSV File creation is Done file name is ', fileName) def duplicateImagehtml(folder_count, image_count, duplicate_ct,items): uri = [] map1,count=check_duplicate_image_new(items) for k, v in map1.items(): name_url = [] name_url.append(k) name_url.append(str(len(v))) name_url.append(str(v)) uri.append(name_url) fb = open('duplicateData.html', 'w') message = """ <html> <head> <script type="text/javascript" src="https://www.gstatic.com/charts/loader.js"></script> <script type="text/javascript"> google.charts.load('current', {'packages':['table']}); google.charts.setOnLoadCallback(drawTable); function drawTable() { var data3 = new google.visualization.DataTable(); data3.addColumn('string', 'Name'); data3.addColumn('string', 'Count'); data3.addRows([ ['Total Folders', '""" + str(folder_count) + """'], ['Total Images', '""" + str(image_count) + """'], ['Duplicate Images', '""" + str(duplicate_ct) + """']]); var table2 = new google.visualization.Table(document.getElementById('table_div_base')); table2.draw(data3, {showRowNumber: true, width: '100%', height: '100%'}); var data = new google.visualization.DataTable(); data.addColumn('string', 'Image Name'); data.addColumn('string', 'Image Count'); data.addColumn('string', 'Image Url'); data.addRows(""" + str(uri) + """); var table = new google.visualization.Table(document.getElementById('table_div')); table.draw(data, {showRowNumber: true, width: '100%', height: '100%'}); } </script> </head> <body><h2 style="text-align: center">Google Drive Summary Table</h2> <div id="table_div_base" style="width: 100%; height: 200px; display:inline-block;border-style: solid"></div> <h2 style="text-align: center" >List of Duplicate Image</h2> <div id="table_div" style="width: 100%; height: 500px; display:inline-block;border-style: solid"></div> </body></html>""" fb.write(message) fb.close() print("Duplicate image data preparing.. ") # webbrowser.open_new_tab('helloworld.html') def draw_chart_create_report(folder_count, image_count, duplicate_ct, map,folder_count_real): #folder_count=len(folder_count) fb = open('gDriveOverview.html', 'w') values = list(map.values()) newlist = [] folder_name = list(map.keys()) total_image_count = [] duplicate_image_count_in_folder = [] for v in values: newlist.append(duplicate_image_list(v)) total_image_count.append(len(v)) for n in newlist: duplicate_image_count_in_folder.append(len(n)) # create plot #print(total_image_count, duplicate_image_count_in_folder, map.keys()) m1 = """<html> <head> <h1 style ="color:black;text-align: center;font-size:25px;margin-left:-6px;margin-bottom:25px;width:1300px;float:left;">Google Drive Data Overview</h1> <script type="text/javascript" src="https://www.gstatic.com/charts/loader.js"></script> <script type="text/javascript"> google.charts.load('current', {'packages':['bar','corechart','table']}); google.charts.setOnLoadCallback(drawChart); function drawChart() { var paiData = google.visualization.arrayToDataTable([ ['Drive', 'Drive Data'], ['Total Images', """ + str(image_count) + """], ['Total duplicate Images', """ + str(duplicate_ct) + """], ['Total Folder', """ + str(folder_count_real) + """] ]); var paiOptions = { title: 'Google Drive Overview' }; var chart = new google.visualization.PieChart(document.getElementById('piechart')); chart.draw(paiData, paiOptions); var barData = google.visualization.arrayToDataTable(""" fb.write(m1) barchart_data = [] barchart_data.append(['Folders', 'Total no of Images', 'Total no of duplicate Images']) for i in range(len(values)): item_list = [] item_list.append(folder_count[i]) item_list.append(total_image_count[i]) item_list.append(duplicate_image_count_in_folder[i]) barchart_data.append(item_list) fb.write(m1) m3 = str(barchart_data) + """); var barOptions = { chart: { title: 'Google Drive Folderwise Overview', subtitle: 'This report is created on '+new Date(), }}; var chart = new google.charts.Bar(document.getElementById('bar_chart')); chart.draw(barData, google.charts.Bar.convertOptions(barOptions)); } </script> </head> <body> <div style="width:100%; margin:0px auto;"> <div id="piechart" style="width: 900px; height: 500px; display:inline-block;"></div> <div id="bar_chart" style="width: 900px; height: 500px; display:inline-block;"></div> </div> <div> <h2> <p style="float:right;color:red;">** <a href="duplicateData.html" target="_blank">Click here to know more about duplicate image data</a></p> </h2></div></body></html> """ fb.write(m3) fb.close() print("Bar and Pie chart creating.... ") def main(): service = get_gdrive_service() print("Wait a moment script is running ..!!!") results = service.files().list(pageSize=1000, fields="nextPageToken,files(id, name,mimeType,parents,webViewLink)").execute() items = results.get('files', []) if not items: # empty drive print('No files found.') else: # create_folder(service) print("-----_") name="g_image_" renameFile(service,items,name) print("==============================") #check_duplicate_image(items) # createDeviceCSV() list_files(items, service) if __name__ == '__main__': main() print("Script is done ..!!!")
name = item["name"] mime_type = item["mimeType"] if name == 'Test Techm': testtechm_id = item['parents'][0]
conditional_block
google_drive_data.py
import csv import io import pickle import os import pip from googleapiclient.discovery import build from google_auth_oauthlib.flow import InstalledAppFlow from google.auth.transport.requests import Request from googleapiclient.http import MediaIoBaseDownload import cv2 import numpy as np SCOPES = ['https://www.googleapis.com/auth/drive.metadata', 'https://www.googleapis.com/auth/drive.file', 'https://www.googleapis.com/auth/drive'] def install(package): if hasattr(pip, 'main'): pip.main(['install', package]) else: pip._internal.main(['install', package]) def create_folder(service): file_metadata = { 'name': 'Test Techm', 'mimeType': 'application/vnd.google-apps.folder' } file = service.files().create(body=file_metadata, fields='id').execute() print('Folder ID: %s' % file.get('id')) def get_gdrive_service(): creds = None # The file token.pickle stores the user's access and refresh tokens, and is # created automatically when the authorization flow completes for the first # time. if os.path.exists('token.pickle'): with open('token.pickle', 'rb') as token: creds = pickle.load(token) # If there are no (valid) credentials available, let the user log in. if not creds or not creds.valid: if creds and creds.expired and creds.refresh_token: creds.refresh(Request()) else: flow = InstalledAppFlow.from_client_secrets_file( 'credentials.json', SCOPES) creds = flow.run_local_server(port=0) # Save the credentials for the next run with open('token.pickle', 'wb') as token: pickle.dump(creds, token) # return Google Drive API service return build('drive', 'v3', credentials=creds) def downloadFile(id, name): service = get_gdrive_service() request = service.files().get_media(fileId=id) fh = io.BytesIO() downloader = MediaIoBaseDownload(fh, request) done = False while done is False: status, done = downloader.next_chunk() with io.open("." + "/" + name, 'wb') as f: fh.seek(0) f.write(fh.read()) def is_duplicate(img1,img2): response=False image1 = cv2.imread(img1) image2 = cv2.imread(img2) try: difference = cv2.subtract(image1, image2) result = not np.any(difference) #if difference is all zeros it will return False if result is True: response=True #duplicate_image.append(list[i]) #print("{} images is matching with {} Occurred {} times ".format(img1,img1,list.count(img1))) except: i=0 return response def check_duplicate_image_new(items): print("Images is loading to memory..") #"""given items returned by Google Drive API, prints them in a tabular way""" map= {} list=[] message= set() duplicate_image=[] final_result={} if not items: print('No files found.') else: for item in items: if item["mimeType"] == "image/jpeg": list.append(item["name"]) #Creating Map value=[] value.append(item["name"]) value.append(item["webViewLink"]) if item["name"] in map: val=set() val.add(item["webViewLink"]) map[item["name"]]=item["webViewLink"] else: map[item["name"]]=item["webViewLink"] #Dowloading Image downloadFile(item["id"],item["name"]) match=[] flag=False for i in range(len(list)-1): temp=[] dp_count=0 flag=False if list[i] not in match : flag=True for j in range(i+1,len(list)): istrue=is_duplicate(list[i],list[j]) if istrue==True: dp_count=dp_count+1 temp.append(list[j]) if list[j] not in match: match.append(list[j]) if list[i] not in match: match.append(list[i]) if len(match)==0: match.append(list[i]) match.append(list[j]) if flag==True and dp_count !=0: #print(list[i]," - ",dp_count) final_result[list[i]]=temp m={} tdct=0 for x, y in final_result.items(): res=y tdct=tdct+len(res) s=set() for i in res: #s=set() for item in items: if item["mimeType"] == "image/jpeg": if item["name"]==i: s.add(item["webViewLink"]) m[x]=s return m,tdct
def duplicate_image_list(imagelist): #print(len(imagelist)) dup_list = [] if len(imagelist) >= 1: for i in range(len(imagelist) - 1): count=0 l=[] for j in range(i + 1, len(imagelist)): image1 = cv2.imread(imagelist[i]) image2 = cv2.imread(imagelist[j]) try: difference = cv2.subtract(image1, image2) result = not np.any(difference) # if difference is all zeros it will return False if result is True: #print(imagelist[i],"Matching with ",imagelist[j]) l.append(imagelist[j]) count=count+1 dup_list.append(imagelist[i]) except: i = 0 return dup_list csv_map = {} def check_duplicate_image(items): # """given items returned by Google Drive API, prints them in a tabular way""" map = {} image_name_list = [] duplicate_image = [] for item in items: file_type = item["mimeType"] if file_type == "image/jpeg": image_name_list.append(item["name"]) #append url or # Creating Map value = [] value.append(item["name"]) value.append(item["webViewLink"]) map[item["id"]] = value csv_map[item["name"]] = item["webViewLink"] # Dowloading Image downloadFile(item["id"], item["name"]) duplicate_image = duplicate_image_list(image_name_list) return duplicate_image def renameFile(service,items, newName): count=1 for item in items: id = item["id"] name = item["name"] mime_type = item["mimeType"] file = service.files().get(fileId=id).execute() del file['id'] if "jpeg" in mime_type: file['name'] = newName+str(count)+ ".jpg"; if "png" in mime_type: file['name'] = newName+str(count)+ ".png"; updated_file = service.files().update(fileId=id, body=file).execute() count=count+1 def count_image(id): imageList = [] service = get_gdrive_service() results = service.files().list(pageSize=1000, q="'{}' in parents".format(id)).execute() items = results.get('files', []) for item in items: mime_Type = item["mimeType"] if mime_Type == "image/jpeg": imageList.append(item["name"]) if mime_Type == "application/vnd.google-apps.folder": imageList.extend(count_image(item["id"])) return imageList def list_files(items, service): folder_count = 0 image_count = 0 imglist = [] count = 0 testtechm_id = '' nm_name = [] img_count = [] list_all_folder_name=[] rows = [] overview_map = {} img_nm=0 for item in items: name = item["name"] mime_type = item["mimeType"] if name == 'Test Techm': testtechm_id = item['parents'][0] for item in items: id = item["id"] name = item["name"] mime_type = item["mimeType"] if mime_type == "application/vnd.google-apps.folder": folder_count = folder_count + 1 if mime_type == "image/jpeg": # renameFile(item["id"],"rajj_img"+str(image_count)) image_count = image_count + 1 if mime_type == "application/vnd.google-apps.folder" and item["parents"][0] == testtechm_id: list_all_folder_name.append(item["name"]) name1 = count_image(id) nm_name.append(name1) img_count.append(len(name1)) overview_map[item["name"]] = name1 rows.append((id, name, mime_type, folder_count)) imglist.append(count) rows.append((id, name, mime_type, folder_count)) #duplicate_count = len(check_duplicate_image(items)) lt,duplicate_ct=check_duplicate_image_new(items) duplicateImagehtml(folder_count, image_count, duplicate_ct,items) # overview chart report page draw_chart_create_report(list_all_folder_name, image_count, duplicate_ct, overview_map,folder_count) def createDeviceCSV(): fileName = 'DuplicateImage.csv' with open(fileName, 'w') as csvFile: writer = csv.writer(csvFile) row = ["Image Name", 'Image Url'] writer.writerow(row) count = 0 for k, v in csv_map.items(): row = [k, v] writer.writerow(row) count = count + 1 #print("Device's adding into csv: " + str(count)) csvFile.close() #print('Device CSV File creation is Done file name is ', fileName) def duplicateImagehtml(folder_count, image_count, duplicate_ct,items): uri = [] map1,count=check_duplicate_image_new(items) for k, v in map1.items(): name_url = [] name_url.append(k) name_url.append(str(len(v))) name_url.append(str(v)) uri.append(name_url) fb = open('duplicateData.html', 'w') message = """ <html> <head> <script type="text/javascript" src="https://www.gstatic.com/charts/loader.js"></script> <script type="text/javascript"> google.charts.load('current', {'packages':['table']}); google.charts.setOnLoadCallback(drawTable); function drawTable() { var data3 = new google.visualization.DataTable(); data3.addColumn('string', 'Name'); data3.addColumn('string', 'Count'); data3.addRows([ ['Total Folders', '""" + str(folder_count) + """'], ['Total Images', '""" + str(image_count) + """'], ['Duplicate Images', '""" + str(duplicate_ct) + """']]); var table2 = new google.visualization.Table(document.getElementById('table_div_base')); table2.draw(data3, {showRowNumber: true, width: '100%', height: '100%'}); var data = new google.visualization.DataTable(); data.addColumn('string', 'Image Name'); data.addColumn('string', 'Image Count'); data.addColumn('string', 'Image Url'); data.addRows(""" + str(uri) + """); var table = new google.visualization.Table(document.getElementById('table_div')); table.draw(data, {showRowNumber: true, width: '100%', height: '100%'}); } </script> </head> <body><h2 style="text-align: center">Google Drive Summary Table</h2> <div id="table_div_base" style="width: 100%; height: 200px; display:inline-block;border-style: solid"></div> <h2 style="text-align: center" >List of Duplicate Image</h2> <div id="table_div" style="width: 100%; height: 500px; display:inline-block;border-style: solid"></div> </body></html>""" fb.write(message) fb.close() print("Duplicate image data preparing.. ") # webbrowser.open_new_tab('helloworld.html') def draw_chart_create_report(folder_count, image_count, duplicate_ct, map,folder_count_real): #folder_count=len(folder_count) fb = open('gDriveOverview.html', 'w') values = list(map.values()) newlist = [] folder_name = list(map.keys()) total_image_count = [] duplicate_image_count_in_folder = [] for v in values: newlist.append(duplicate_image_list(v)) total_image_count.append(len(v)) for n in newlist: duplicate_image_count_in_folder.append(len(n)) # create plot #print(total_image_count, duplicate_image_count_in_folder, map.keys()) m1 = """<html> <head> <h1 style ="color:black;text-align: center;font-size:25px;margin-left:-6px;margin-bottom:25px;width:1300px;float:left;">Google Drive Data Overview</h1> <script type="text/javascript" src="https://www.gstatic.com/charts/loader.js"></script> <script type="text/javascript"> google.charts.load('current', {'packages':['bar','corechart','table']}); google.charts.setOnLoadCallback(drawChart); function drawChart() { var paiData = google.visualization.arrayToDataTable([ ['Drive', 'Drive Data'], ['Total Images', """ + str(image_count) + """], ['Total duplicate Images', """ + str(duplicate_ct) + """], ['Total Folder', """ + str(folder_count_real) + """] ]); var paiOptions = { title: 'Google Drive Overview' }; var chart = new google.visualization.PieChart(document.getElementById('piechart')); chart.draw(paiData, paiOptions); var barData = google.visualization.arrayToDataTable(""" fb.write(m1) barchart_data = [] barchart_data.append(['Folders', 'Total no of Images', 'Total no of duplicate Images']) for i in range(len(values)): item_list = [] item_list.append(folder_count[i]) item_list.append(total_image_count[i]) item_list.append(duplicate_image_count_in_folder[i]) barchart_data.append(item_list) fb.write(m1) m3 = str(barchart_data) + """); var barOptions = { chart: { title: 'Google Drive Folderwise Overview', subtitle: 'This report is created on '+new Date(), }}; var chart = new google.charts.Bar(document.getElementById('bar_chart')); chart.draw(barData, google.charts.Bar.convertOptions(barOptions)); } </script> </head> <body> <div style="width:100%; margin:0px auto;"> <div id="piechart" style="width: 900px; height: 500px; display:inline-block;"></div> <div id="bar_chart" style="width: 900px; height: 500px; display:inline-block;"></div> </div> <div> <h2> <p style="float:right;color:red;">** <a href="duplicateData.html" target="_blank">Click here to know more about duplicate image data</a></p> </h2></div></body></html> """ fb.write(m3) fb.close() print("Bar and Pie chart creating.... ") def main(): service = get_gdrive_service() print("Wait a moment script is running ..!!!") results = service.files().list(pageSize=1000, fields="nextPageToken,files(id, name,mimeType,parents,webViewLink)").execute() items = results.get('files', []) if not items: # empty drive print('No files found.') else: # create_folder(service) print("-----_") name="g_image_" renameFile(service,items,name) print("==============================") #check_duplicate_image(items) # createDeviceCSV() list_files(items, service) if __name__ == '__main__': main() print("Script is done ..!!!")
random_line_split
google_drive_data.py
import csv import io import pickle import os import pip from googleapiclient.discovery import build from google_auth_oauthlib.flow import InstalledAppFlow from google.auth.transport.requests import Request from googleapiclient.http import MediaIoBaseDownload import cv2 import numpy as np SCOPES = ['https://www.googleapis.com/auth/drive.metadata', 'https://www.googleapis.com/auth/drive.file', 'https://www.googleapis.com/auth/drive'] def install(package): if hasattr(pip, 'main'): pip.main(['install', package]) else: pip._internal.main(['install', package]) def
(service): file_metadata = { 'name': 'Test Techm', 'mimeType': 'application/vnd.google-apps.folder' } file = service.files().create(body=file_metadata, fields='id').execute() print('Folder ID: %s' % file.get('id')) def get_gdrive_service(): creds = None # The file token.pickle stores the user's access and refresh tokens, and is # created automatically when the authorization flow completes for the first # time. if os.path.exists('token.pickle'): with open('token.pickle', 'rb') as token: creds = pickle.load(token) # If there are no (valid) credentials available, let the user log in. if not creds or not creds.valid: if creds and creds.expired and creds.refresh_token: creds.refresh(Request()) else: flow = InstalledAppFlow.from_client_secrets_file( 'credentials.json', SCOPES) creds = flow.run_local_server(port=0) # Save the credentials for the next run with open('token.pickle', 'wb') as token: pickle.dump(creds, token) # return Google Drive API service return build('drive', 'v3', credentials=creds) def downloadFile(id, name): service = get_gdrive_service() request = service.files().get_media(fileId=id) fh = io.BytesIO() downloader = MediaIoBaseDownload(fh, request) done = False while done is False: status, done = downloader.next_chunk() with io.open("." + "/" + name, 'wb') as f: fh.seek(0) f.write(fh.read()) def is_duplicate(img1,img2): response=False image1 = cv2.imread(img1) image2 = cv2.imread(img2) try: difference = cv2.subtract(image1, image2) result = not np.any(difference) #if difference is all zeros it will return False if result is True: response=True #duplicate_image.append(list[i]) #print("{} images is matching with {} Occurred {} times ".format(img1,img1,list.count(img1))) except: i=0 return response def check_duplicate_image_new(items): print("Images is loading to memory..") #"""given items returned by Google Drive API, prints them in a tabular way""" map= {} list=[] message= set() duplicate_image=[] final_result={} if not items: print('No files found.') else: for item in items: if item["mimeType"] == "image/jpeg": list.append(item["name"]) #Creating Map value=[] value.append(item["name"]) value.append(item["webViewLink"]) if item["name"] in map: val=set() val.add(item["webViewLink"]) map[item["name"]]=item["webViewLink"] else: map[item["name"]]=item["webViewLink"] #Dowloading Image downloadFile(item["id"],item["name"]) match=[] flag=False for i in range(len(list)-1): temp=[] dp_count=0 flag=False if list[i] not in match : flag=True for j in range(i+1,len(list)): istrue=is_duplicate(list[i],list[j]) if istrue==True: dp_count=dp_count+1 temp.append(list[j]) if list[j] not in match: match.append(list[j]) if list[i] not in match: match.append(list[i]) if len(match)==0: match.append(list[i]) match.append(list[j]) if flag==True and dp_count !=0: #print(list[i]," - ",dp_count) final_result[list[i]]=temp m={} tdct=0 for x, y in final_result.items(): res=y tdct=tdct+len(res) s=set() for i in res: #s=set() for item in items: if item["mimeType"] == "image/jpeg": if item["name"]==i: s.add(item["webViewLink"]) m[x]=s return m,tdct def duplicate_image_list(imagelist): #print(len(imagelist)) dup_list = [] if len(imagelist) >= 1: for i in range(len(imagelist) - 1): count=0 l=[] for j in range(i + 1, len(imagelist)): image1 = cv2.imread(imagelist[i]) image2 = cv2.imread(imagelist[j]) try: difference = cv2.subtract(image1, image2) result = not np.any(difference) # if difference is all zeros it will return False if result is True: #print(imagelist[i],"Matching with ",imagelist[j]) l.append(imagelist[j]) count=count+1 dup_list.append(imagelist[i]) except: i = 0 return dup_list csv_map = {} def check_duplicate_image(items): # """given items returned by Google Drive API, prints them in a tabular way""" map = {} image_name_list = [] duplicate_image = [] for item in items: file_type = item["mimeType"] if file_type == "image/jpeg": image_name_list.append(item["name"]) #append url or # Creating Map value = [] value.append(item["name"]) value.append(item["webViewLink"]) map[item["id"]] = value csv_map[item["name"]] = item["webViewLink"] # Dowloading Image downloadFile(item["id"], item["name"]) duplicate_image = duplicate_image_list(image_name_list) return duplicate_image def renameFile(service,items, newName): count=1 for item in items: id = item["id"] name = item["name"] mime_type = item["mimeType"] file = service.files().get(fileId=id).execute() del file['id'] if "jpeg" in mime_type: file['name'] = newName+str(count)+ ".jpg"; if "png" in mime_type: file['name'] = newName+str(count)+ ".png"; updated_file = service.files().update(fileId=id, body=file).execute() count=count+1 def count_image(id): imageList = [] service = get_gdrive_service() results = service.files().list(pageSize=1000, q="'{}' in parents".format(id)).execute() items = results.get('files', []) for item in items: mime_Type = item["mimeType"] if mime_Type == "image/jpeg": imageList.append(item["name"]) if mime_Type == "application/vnd.google-apps.folder": imageList.extend(count_image(item["id"])) return imageList def list_files(items, service): folder_count = 0 image_count = 0 imglist = [] count = 0 testtechm_id = '' nm_name = [] img_count = [] list_all_folder_name=[] rows = [] overview_map = {} img_nm=0 for item in items: name = item["name"] mime_type = item["mimeType"] if name == 'Test Techm': testtechm_id = item['parents'][0] for item in items: id = item["id"] name = item["name"] mime_type = item["mimeType"] if mime_type == "application/vnd.google-apps.folder": folder_count = folder_count + 1 if mime_type == "image/jpeg": # renameFile(item["id"],"rajj_img"+str(image_count)) image_count = image_count + 1 if mime_type == "application/vnd.google-apps.folder" and item["parents"][0] == testtechm_id: list_all_folder_name.append(item["name"]) name1 = count_image(id) nm_name.append(name1) img_count.append(len(name1)) overview_map[item["name"]] = name1 rows.append((id, name, mime_type, folder_count)) imglist.append(count) rows.append((id, name, mime_type, folder_count)) #duplicate_count = len(check_duplicate_image(items)) lt,duplicate_ct=check_duplicate_image_new(items) duplicateImagehtml(folder_count, image_count, duplicate_ct,items) # overview chart report page draw_chart_create_report(list_all_folder_name, image_count, duplicate_ct, overview_map,folder_count) def createDeviceCSV(): fileName = 'DuplicateImage.csv' with open(fileName, 'w') as csvFile: writer = csv.writer(csvFile) row = ["Image Name", 'Image Url'] writer.writerow(row) count = 0 for k, v in csv_map.items(): row = [k, v] writer.writerow(row) count = count + 1 #print("Device's adding into csv: " + str(count)) csvFile.close() #print('Device CSV File creation is Done file name is ', fileName) def duplicateImagehtml(folder_count, image_count, duplicate_ct,items): uri = [] map1,count=check_duplicate_image_new(items) for k, v in map1.items(): name_url = [] name_url.append(k) name_url.append(str(len(v))) name_url.append(str(v)) uri.append(name_url) fb = open('duplicateData.html', 'w') message = """ <html> <head> <script type="text/javascript" src="https://www.gstatic.com/charts/loader.js"></script> <script type="text/javascript"> google.charts.load('current', {'packages':['table']}); google.charts.setOnLoadCallback(drawTable); function drawTable() { var data3 = new google.visualization.DataTable(); data3.addColumn('string', 'Name'); data3.addColumn('string', 'Count'); data3.addRows([ ['Total Folders', '""" + str(folder_count) + """'], ['Total Images', '""" + str(image_count) + """'], ['Duplicate Images', '""" + str(duplicate_ct) + """']]); var table2 = new google.visualization.Table(document.getElementById('table_div_base')); table2.draw(data3, {showRowNumber: true, width: '100%', height: '100%'}); var data = new google.visualization.DataTable(); data.addColumn('string', 'Image Name'); data.addColumn('string', 'Image Count'); data.addColumn('string', 'Image Url'); data.addRows(""" + str(uri) + """); var table = new google.visualization.Table(document.getElementById('table_div')); table.draw(data, {showRowNumber: true, width: '100%', height: '100%'}); } </script> </head> <body><h2 style="text-align: center">Google Drive Summary Table</h2> <div id="table_div_base" style="width: 100%; height: 200px; display:inline-block;border-style: solid"></div> <h2 style="text-align: center" >List of Duplicate Image</h2> <div id="table_div" style="width: 100%; height: 500px; display:inline-block;border-style: solid"></div> </body></html>""" fb.write(message) fb.close() print("Duplicate image data preparing.. ") # webbrowser.open_new_tab('helloworld.html') def draw_chart_create_report(folder_count, image_count, duplicate_ct, map,folder_count_real): #folder_count=len(folder_count) fb = open('gDriveOverview.html', 'w') values = list(map.values()) newlist = [] folder_name = list(map.keys()) total_image_count = [] duplicate_image_count_in_folder = [] for v in values: newlist.append(duplicate_image_list(v)) total_image_count.append(len(v)) for n in newlist: duplicate_image_count_in_folder.append(len(n)) # create plot #print(total_image_count, duplicate_image_count_in_folder, map.keys()) m1 = """<html> <head> <h1 style ="color:black;text-align: center;font-size:25px;margin-left:-6px;margin-bottom:25px;width:1300px;float:left;">Google Drive Data Overview</h1> <script type="text/javascript" src="https://www.gstatic.com/charts/loader.js"></script> <script type="text/javascript"> google.charts.load('current', {'packages':['bar','corechart','table']}); google.charts.setOnLoadCallback(drawChart); function drawChart() { var paiData = google.visualization.arrayToDataTable([ ['Drive', 'Drive Data'], ['Total Images', """ + str(image_count) + """], ['Total duplicate Images', """ + str(duplicate_ct) + """], ['Total Folder', """ + str(folder_count_real) + """] ]); var paiOptions = { title: 'Google Drive Overview' }; var chart = new google.visualization.PieChart(document.getElementById('piechart')); chart.draw(paiData, paiOptions); var barData = google.visualization.arrayToDataTable(""" fb.write(m1) barchart_data = [] barchart_data.append(['Folders', 'Total no of Images', 'Total no of duplicate Images']) for i in range(len(values)): item_list = [] item_list.append(folder_count[i]) item_list.append(total_image_count[i]) item_list.append(duplicate_image_count_in_folder[i]) barchart_data.append(item_list) fb.write(m1) m3 = str(barchart_data) + """); var barOptions = { chart: { title: 'Google Drive Folderwise Overview', subtitle: 'This report is created on '+new Date(), }}; var chart = new google.charts.Bar(document.getElementById('bar_chart')); chart.draw(barData, google.charts.Bar.convertOptions(barOptions)); } </script> </head> <body> <div style="width:100%; margin:0px auto;"> <div id="piechart" style="width: 900px; height: 500px; display:inline-block;"></div> <div id="bar_chart" style="width: 900px; height: 500px; display:inline-block;"></div> </div> <div> <h2> <p style="float:right;color:red;">** <a href="duplicateData.html" target="_blank">Click here to know more about duplicate image data</a></p> </h2></div></body></html> """ fb.write(m3) fb.close() print("Bar and Pie chart creating.... ") def main(): service = get_gdrive_service() print("Wait a moment script is running ..!!!") results = service.files().list(pageSize=1000, fields="nextPageToken,files(id, name,mimeType,parents,webViewLink)").execute() items = results.get('files', []) if not items: # empty drive print('No files found.') else: # create_folder(service) print("-----_") name="g_image_" renameFile(service,items,name) print("==============================") #check_duplicate_image(items) # createDeviceCSV() list_files(items, service) if __name__ == '__main__': main() print("Script is done ..!!!")
create_folder
identifier_name
mod.rs
// Copyright 2017-2019 Parity Technologies (UK) Ltd. // This file is part of Substrate. // Substrate is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // Substrate is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License // along with Substrate. If not, see <http://www.gnu.org/licenses/>. //! # Bridge Module //! //! This will eventually have some useful documentation. //! For now though, enjoy this cow's wisdom. //! //!```ignore //!________________________________________ //! / You are only young once, but you can \ //! \ stay immature indefinitely. / //! ---------------------------------------- //! \ ^__^ //! \ (oo)\_______ //! (__)\ )\/\ //! ||----w | //! || || //!``` // // Ensure we're `no_std` when compiling for Wasm. // #![cfg_attr(not(feature = "std"), no_std)] mod error; mod justification; pub mod storage_proof; mod types; use im::OrdMap as BTreeMap; use std::fmt; use anyhow::Result; use error::JustificationError; use justification::GrandpaJustification; use log::{error, info}; use phala_serde_more as more; use serde::{Deserialize, Serialize}; use storage_proof::{StorageProof, StorageProofChecker}; use finality_grandpa::voter_set::VoterSet; use num::AsPrimitive; use parity_scale_codec::{Decode, Encode}; use sp_consensus_grandpa::{AuthorityId, AuthorityWeight, SetId}; use sp_core::H256; use sp_runtime::traits::{Block as BlockT, Header, NumberFor}; use sp_runtime::EncodedJustification; pub use types::{AuthoritySet, AuthoritySetChange}; #[derive(Encode, Decode, Clone, PartialEq, Serialize, Deserialize)] pub struct BridgeInfo<T: Config> { #[serde(bound( serialize = "T::Header: ::serde::Serialize", deserialize = "T::Header: ::serde::de::DeserializeOwned" ))] last_finalized_block_header: T::Header, #[serde(with = "more::scale_bytes")] current_set: AuthoritySet, } impl<T: Config> BridgeInfo<T> { pub fn new(block_header: T::Header, validator_set: AuthoritySet) -> Self { BridgeInfo { last_finalized_block_header: block_header, current_set: validator_set, } } } type BridgeId = u64; pub trait Config: frame_system::Config<Hash = H256> { type Block: BlockT<Hash = H256, Header = Self::Header>; } impl Config for chain::Runtime { type Block = chain::Block; } #[derive(Encode, Decode, Clone, Serialize, Deserialize)] pub struct LightValidation<T: Config> { num_bridges: BridgeId, #[serde(bound( serialize = "T::Header: ::serde::Serialize", deserialize = "T::Header: ::serde::de::DeserializeOwned" ))] tracked_bridges: BTreeMap<BridgeId, BridgeInfo<T>>, } impl<T: Config> LightValidation<T> where NumberFor<T::Block>: AsPrimitive<usize>, { #[allow(clippy::new_without_default)] pub fn new() -> Self { LightValidation { num_bridges: 0, tracked_bridges: BTreeMap::new(), } } pub fn initialize_bridge( &mut self, block_header: T::Header, validator_set: AuthoritySet, proof: StorageProof, ) -> Result<BridgeId> { let state_root = block_header.state_root(); Self::check_validator_set_proof(state_root, proof, &validator_set.list, validator_set.id) .map_err(anyhow::Error::msg)?; let bridge_info = BridgeInfo::new(block_header, validator_set); let new_bridge_id = self.num_bridges + 1; self.tracked_bridges.insert(new_bridge_id, bridge_info); self.num_bridges = new_bridge_id; Ok(new_bridge_id) } /// Submits a sequence of block headers to the light client to validate /// /// The light client accepts a sequence of block headers, optionally with an authority set change /// in the last block. Without the authority set change, it assumes the authority set and the set /// id remains the same after submitting the blocks. One submission can have at most one authortiy /// set change (change.set_id == last_set_id + 1). pub fn submit_finalized_headers( &mut self, bridge_id: BridgeId, header: T::Header, ancestry_proof: Vec<T::Header>, grandpa_proof: EncodedJustification, auhtority_set_change: Option<AuthoritySetChange>, ) -> Result<()> { let bridge = self .tracked_bridges .get(&bridge_id) .ok_or_else(|| anyhow::Error::msg(Error::NoSuchBridgeExists))?; // Check that the new header is a decendent of the old header let last_header = &bridge.last_finalized_block_header; verify_ancestry(ancestry_proof, last_header.hash(), &header)?; let block_hash = header.hash(); let block_num = *header.number(); // Check that the header has been finalized let voters = &bridge.current_set; let voter_set = VoterSet::new(voters.list.clone()).unwrap(); let voter_set_id = voters.id; verify_grandpa_proof::<T::Block>( grandpa_proof, block_hash, block_num, voter_set_id, &voter_set, )?; match self.tracked_bridges.get_mut(&bridge_id) { Some(bridge_info) => { bridge_info.last_finalized_block_header = header; if let Some(change) = auhtority_set_change { // Check the validator set increment if change.authority_set.id != voter_set_id + 1 { return Err(anyhow::Error::msg(Error::UnexpectedValidatorSetId)); } // Check validator set change proof let state_root = bridge_info.last_finalized_block_header.state_root(); Self::check_validator_set_proof( state_root, change.authority_proof, &change.authority_set.list, change.authority_set.id, )?; // Commit bridge_info.current_set = AuthoritySet { list: change.authority_set.list, id: change.authority_set.id, } } } _ => panic!("We succesfully got this bridge earlier, therefore it exists; qed"), }; Ok(()) } pub fn validate_storage_proof( &self, state_root: T::Hash,
items: &[(&[u8], &[u8])], // &[(key, value)] ) -> Result<()> { let checker = StorageProofChecker::<T::Hashing>::new(state_root, proof)?; for (k, v) in items { let actual_value = checker .read_value(k)? .ok_or_else(|| anyhow::Error::msg(Error::StorageValueUnavailable))?; if actual_value.as_slice() != *v { return Err(anyhow::Error::msg(Error::StorageValueMismatch)); } } Ok(()) } } #[derive(Debug)] pub enum Error { // InvalidStorageProof, // StorageRootMismatch, StorageValueUnavailable, // InvalidValidatorSetProof, ValidatorSetMismatch, InvalidAncestryProof, NoSuchBridgeExists, InvalidFinalityProof, // UnknownClientError, // HeaderAncestryMismatch, UnexpectedValidatorSetId, StorageValueMismatch, } impl fmt::Display for Error { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { // Error::StorageRootMismatch => write!(f, "storage root mismatch"), Error::StorageValueUnavailable => write!(f, "storage value unavailable"), Error::ValidatorSetMismatch => write!(f, "validator set mismatch"), Error::InvalidAncestryProof => write!(f, "invalid ancestry proof"), Error::NoSuchBridgeExists => write!(f, "no such bridge exists"), Error::InvalidFinalityProof => write!(f, "invalid finality proof"), // Error::HeaderAncestryMismatch => write!(f, "header ancestry mismatch"), Error::UnexpectedValidatorSetId => write!(f, "unexpected validator set id"), Error::StorageValueMismatch => write!(f, "storage value mismatch"), } } } impl From<JustificationError> for Error { fn from(e: JustificationError) -> Self { match e { JustificationError::BadJustification(msg) => { error!("InvalidFinalityProof(BadJustification({}))", msg); Error::InvalidFinalityProof } JustificationError::JustificationDecode => { error!("InvalidFinalityProof(JustificationDecode)"); Error::InvalidFinalityProof } } } } impl<T: Config> LightValidation<T> where NumberFor<T::Block>: AsPrimitive<usize>, { fn check_validator_set_proof( state_root: &T::Hash, proof: StorageProof, validator_set: &[(AuthorityId, AuthorityWeight)], _set_id: SetId, ) -> Result<()> { let checker = <StorageProofChecker<T::Hashing>>::new(*state_root, proof)?; // By encoding the given set we should have an easy way to compare // with the stuff we get out of storage via `read_value` let mut encoded_validator_set = validator_set.encode(); encoded_validator_set.insert(0, 1); // Add AUTHORITIES_VERISON == 1 let actual_validator_set = checker .read_value(b":grandpa_authorities")? .ok_or_else(|| anyhow::Error::msg(Error::StorageValueUnavailable))?; // TODO: check set_id // checker.read_value(grandpa::CurrentSetId.key()) if encoded_validator_set == actual_validator_set { Ok(()) } else { Err(anyhow::Error::msg(Error::ValidatorSetMismatch)) } } } // A naive way to check whether a `child` header is a decendent // of an `ancestor` header. For this it requires a proof which // is a chain of headers between (but not including) the `child` // and `ancestor`. This could be updated to use something like // Log2 Ancestors (#2053) in the future. fn verify_ancestry<H>(proof: Vec<H>, ancestor_hash: H::Hash, child: &H) -> Result<()> where H: Header<Hash = H256>, { { info!("ancestor_hash: {}", ancestor_hash); for h in proof.iter() { info!( "block {:?} - hash: {} parent: {}", h.number(), h.hash(), h.parent_hash() ); } info!( "child block {:?} - hash: {} parent: {}", child.number(), child.hash(), child.parent_hash() ); } let mut parent_hash = child.parent_hash(); if *parent_hash == ancestor_hash { return Ok(()); } // If we find that the header's parent hash matches our ancestor's hash we're done for header in proof.iter() { // Need to check that blocks are actually related if header.hash() != *parent_hash { break; } parent_hash = header.parent_hash(); if *parent_hash == ancestor_hash { return Ok(()); } } Err(anyhow::Error::msg(Error::InvalidAncestryProof)) } fn verify_grandpa_proof<B>( justification: EncodedJustification, hash: B::Hash, number: NumberFor<B>, set_id: u64, voters: &VoterSet<AuthorityId>, ) -> Result<()> where B: BlockT<Hash = H256>, NumberFor<B>: finality_grandpa::BlockNumberOps, { // We don't really care about the justification, as long as it's valid let _ = GrandpaJustification::<B>::decode_and_verify_finalizes( &justification, (hash, number), set_id, voters, ) .map_err(anyhow::Error::msg)?; Ok(()) } impl<T: Config> fmt::Debug for LightValidation<T> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!( f, "LightValidationTest {{ num_bridges: {}, tracked_bridges: {:?} }}", self.num_bridges, self.tracked_bridges ) } } impl<T: Config> fmt::Debug for BridgeInfo<T> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "BridgeInfo {{ last_finalized_block_header: {:?}, current_validator_set: {:?}, current_validator_set_id: {} }}", self.last_finalized_block_header, self.current_set.list, self.current_set.id) } } pub mod utils { use parity_scale_codec::Encode; /// Gets the prefix of a storage item pub fn storage_prefix(module: &str, storage: &str) -> Vec<u8> { let mut bytes = sp_core::twox_128(module.as_bytes()).to_vec(); bytes.extend(&sp_core::twox_128(storage.as_bytes())[..]); bytes } /// Calculates the Substrate storage key prefix for a StorageMap pub fn storage_map_prefix_twox_64_concat( module: &[u8], storage_item: &[u8], key: &(impl Encode + ?Sized), ) -> Vec<u8> { let mut bytes = sp_core::twox_128(module).to_vec(); bytes.extend(&sp_core::twox_128(storage_item)[..]); let encoded = key.encode(); bytes.extend(sp_core::twox_64(&encoded)); bytes.extend(&encoded); bytes } #[test] #[ignore = "for debug"] fn show_keys() { let modules = [ "System", "Timestamp", "RandomnessCollectiveFlip", "Utility", "Multisig", "Proxy", "Vesting", "Scheduler", "Preimage", "ParachainInfo", "ParachainSystem", "XcmpQueue", "CumulusXcm", "DmpQueue", "PolkadotXcm", "Balances", "TransactionPayment", "Authorship", "CollatorSelection", "Session", "Aura", "AuraExt", "Identity", "Democracy", "Council", "Treasury", "Bounties", "Lottery", "TechnicalCommittee", "TechnicalMembership", "PhragmenElection", "Tips", "ChildBounties", "ChainBridge", "XcmBridge", "XTransfer", "PhalaMq", "PhalaRegistry", "PhalaComputation", "PhalaStakePool", "Assets", "AssetsRegistry", "PhalaStakePoolv2", "PhalaVault", "PhalaWrappedBalances", "PhalaBasePool", "Uniques", "RmrkCore", "RmrkEquip", "RmrkMarket", "PWNftSale", "PWIncubation", ]; for module in modules.iter() { let key = storage_prefix(module, ""); println!("{module}: 0x{}", hex::encode(key)); } let storage_keys = [ "Collections", "Nfts", "Priorities", "Children", "Resources", "EquippableBases", "EquippableSlots", "Properties", "Lock", "DummyStorage", ]; for key in storage_keys.iter() { let prefix = storage_prefix("RmrkCore", key); println!("RmrkCore::{key}: 0x{}", hex::encode(prefix)); } /* System: 0x26aa394eea5630e07c48ae0c9558cef799e9d85137db46ef4bbea33613baafd5 Timestamp: 0xf0c365c3cf59d671eb72da0e7a4113c499e9d85137db46ef4bbea33613baafd5 RandomnessCollectiveFlip: 0xbd2a529379475088d3e29a918cd4787299e9d85137db46ef4bbea33613baafd5 Utility: 0xd5e1a2fa16732ce6906189438c0a82c699e9d85137db46ef4bbea33613baafd5 Multisig: 0x7474449cca95dc5d0c00e71735a6d17d99e9d85137db46ef4bbea33613baafd5 Proxy: 0x1809d78346727a0ef58c0fa03bafa32399e9d85137db46ef4bbea33613baafd5 Vesting: 0x5f27b51b5ec208ee9cb25b55d872824399e9d85137db46ef4bbea33613baafd5 Scheduler: 0x3db7a24cfdc9de785974746c14a99df999e9d85137db46ef4bbea33613baafd5 Preimage: 0xd8f314b7f4e6b095f0f8ee4656a4482599e9d85137db46ef4bbea33613baafd5 ParachainInfo: 0x0d715f2646c8f85767b5d2764bb2782699e9d85137db46ef4bbea33613baafd5 ParachainSystem: 0x45323df7cc47150b3930e2666b0aa31399e9d85137db46ef4bbea33613baafd5 XcmpQueue: 0x7b3237373ffdfeb1cab4222e3b520d6b99e9d85137db46ef4bbea33613baafd5 CumulusXcm: 0x79e2fe5d327165001f8232643023ed8b99e9d85137db46ef4bbea33613baafd5 DmpQueue: 0xcd5c1f6df63bc97f4a8ce37f14a50ca799e9d85137db46ef4bbea33613baafd5 PolkadotXcm: 0xe38f185207498abb5c213d0fb059b3d899e9d85137db46ef4bbea33613baafd5 Balances: 0xc2261276cc9d1f8598ea4b6a74b15c2f99e9d85137db46ef4bbea33613baafd5 TransactionPayment: 0x3f1467a096bcd71a5b6a0c8155e2081099e9d85137db46ef4bbea33613baafd5 Authorship: 0xd57bce545fb382c34570e5dfbf338f5e99e9d85137db46ef4bbea33613baafd5 CollatorSelection: 0x15464cac3378d46f113cd5b7a4d71c8499e9d85137db46ef4bbea33613baafd5 Session: 0xcec5070d609dd3497f72bde07fc96ba099e9d85137db46ef4bbea33613baafd5 Aura: 0x57f8dc2f5ab09467896f47300f04243899e9d85137db46ef4bbea33613baafd5 AuraExt: 0x3c311d57d4daf52904616cf69648081e99e9d85137db46ef4bbea33613baafd5 Identity: 0x2aeddc77fe58c98d50bd37f1b90840f999e9d85137db46ef4bbea33613baafd5 Democracy: 0xf2794c22e353e9a839f12faab03a911b99e9d85137db46ef4bbea33613baafd5 Council: 0xaebd463ed9925c488c112434d61debc099e9d85137db46ef4bbea33613baafd5 Treasury: 0x89d139e01a5eb2256f222e5fc5dbe6b399e9d85137db46ef4bbea33613baafd5 Bounties: 0xa37f719efab16103103a0c8c2c784ce199e9d85137db46ef4bbea33613baafd5 Lottery: 0xfbc9f53700f75f681f234e70fb7241eb99e9d85137db46ef4bbea33613baafd5 TechnicalCommittee: 0xed25f63942de25ac5253ba64b5eb64d199e9d85137db46ef4bbea33613baafd5 TechnicalMembership: 0x3a2d6c9353500637d8f8e3e0fa0bb1c599e9d85137db46ef4bbea33613baafd5 PhragmenElection: 0xe2e62dd81c48a88f73b6f6463555fd8e99e9d85137db46ef4bbea33613baafd5 Tips: 0x2c5de123c468aef7f3ac2ab3a76f87ce99e9d85137db46ef4bbea33613baafd5 ChildBounties: 0xedfb05b766f199ce00df85317e33050e99e9d85137db46ef4bbea33613baafd5 ChainBridge: 0x43cdcd39d5edb1d16e24fa028edde0de99e9d85137db46ef4bbea33613baafd5 XcmBridge: 0x9d0cdc3697970df81fa5fabe88fa03ea99e9d85137db46ef4bbea33613baafd5 XTransfer: 0xc0cf946351a2b7b37cc8f3086b3674a199e9d85137db46ef4bbea33613baafd5 PhalaMq: 0x2f039a6a7f13e94b9545257e54062a0499e9d85137db46ef4bbea33613baafd5 PhalaRegistry: 0x0d746931e7a6bfd47fbcccfd71984aef99e9d85137db46ef4bbea33613baafd5 PhalaComputation: 0xb71c310d8c830d345ee1c1b84566a8d199e9d85137db46ef4bbea33613baafd5 PhalaStakePool: 0x9708ddcf89326bf4f4428dd135287d5199e9d85137db46ef4bbea33613baafd5 Assets: 0x682a59d51ab9e48a8c8cc418ff9708d299e9d85137db46ef4bbea33613baafd5 AssetsRegistry: 0xf7860e52b3d3660de35c808455ec483699e9d85137db46ef4bbea33613baafd5 PhalaStakePoolv2: 0x75e3ed3f59e45643ed1149ad80929c1b99e9d85137db46ef4bbea33613baafd5 PhalaVault: 0xa61c43efbf2367eb32ddcf956fc97dd499e9d85137db46ef4bbea33613baafd5 PhalaWrappedBalances: 0x1466de7f00add77dbab4df042b8c4a8499e9d85137db46ef4bbea33613baafd5 PhalaBasePool: 0x00f8eafbad3b4a32114491ad7e12491499e9d85137db46ef4bbea33613baafd5 Uniques: 0x5e8a19e3cd1b7c148b33880c479c028199e9d85137db46ef4bbea33613baafd5 RmrkCore: 0x5bef2c5471aa9e955551dc810f5abb3999e9d85137db46ef4bbea33613baafd5 RmrkEquip: 0x8c2ffe3a0b5892f363d8b9e374b9e9fc99e9d85137db46ef4bbea33613baafd5 RmrkMarket: 0x826a25a29a1da02112a6b8390475706699e9d85137db46ef4bbea33613baafd5 PWNftSale: 0x04d3f224a1307398074171146ffc417299e9d85137db46ef4bbea33613baafd5 PWIncubation: 0x66b8232d707a1e10cc0cc5a75b738ad299e9d85137db46ef4bbea33613baafd5 RmrkCore::Collections: 0x5bef2c5471aa9e955551dc810f5abb399200647b8c99af7b8b52752114831bdb RmrkCore::Nfts: 0x5bef2c5471aa9e955551dc810f5abb39e8d49389c2e23e152fdd6364daadd2cc RmrkCore::Priorities: 0x5bef2c5471aa9e955551dc810f5abb397f6749268d89e15586d82478e7290431 RmrkCore::Children: 0x5bef2c5471aa9e955551dc810f5abb39261f5a952a31d4199096219bbfd87740 RmrkCore::Resources: 0x5bef2c5471aa9e955551dc810f5abb392111e0df19de9563b58301e5f7e00743 RmrkCore::EquippableBases: 0x5bef2c5471aa9e955551dc810f5abb39ef660df27389f71e45f1741b554773fb RmrkCore::EquippableSlots: 0x5bef2c5471aa9e955551dc810f5abb393e26973064c5f9a17e8bfaa18aee3013 RmrkCore::Properties: 0x5bef2c5471aa9e955551dc810f5abb39a436740684271e6e2985d7bb452fdf99 RmrkCore::Lock: 0x5bef2c5471aa9e955551dc810f5abb39fb1ef94455cc6b5a3840206754686d98 RmrkCore::DummyStorage: 0x5bef2c5471aa9e955551dc810f5abb399439307cc9be85229487820a36657c35 */ } }
proof: StorageProof,
random_line_split
mod.rs
// Copyright 2017-2019 Parity Technologies (UK) Ltd. // This file is part of Substrate. // Substrate is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // Substrate is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License // along with Substrate. If not, see <http://www.gnu.org/licenses/>. //! # Bridge Module //! //! This will eventually have some useful documentation. //! For now though, enjoy this cow's wisdom. //! //!```ignore //!________________________________________ //! / You are only young once, but you can \ //! \ stay immature indefinitely. / //! ---------------------------------------- //! \ ^__^ //! \ (oo)\_______ //! (__)\ )\/\ //! ||----w | //! || || //!``` // // Ensure we're `no_std` when compiling for Wasm. // #![cfg_attr(not(feature = "std"), no_std)] mod error; mod justification; pub mod storage_proof; mod types; use im::OrdMap as BTreeMap; use std::fmt; use anyhow::Result; use error::JustificationError; use justification::GrandpaJustification; use log::{error, info}; use phala_serde_more as more; use serde::{Deserialize, Serialize}; use storage_proof::{StorageProof, StorageProofChecker}; use finality_grandpa::voter_set::VoterSet; use num::AsPrimitive; use parity_scale_codec::{Decode, Encode}; use sp_consensus_grandpa::{AuthorityId, AuthorityWeight, SetId}; use sp_core::H256; use sp_runtime::traits::{Block as BlockT, Header, NumberFor}; use sp_runtime::EncodedJustification; pub use types::{AuthoritySet, AuthoritySetChange}; #[derive(Encode, Decode, Clone, PartialEq, Serialize, Deserialize)] pub struct BridgeInfo<T: Config> { #[serde(bound( serialize = "T::Header: ::serde::Serialize", deserialize = "T::Header: ::serde::de::DeserializeOwned" ))] last_finalized_block_header: T::Header, #[serde(with = "more::scale_bytes")] current_set: AuthoritySet, } impl<T: Config> BridgeInfo<T> { pub fn new(block_header: T::Header, validator_set: AuthoritySet) -> Self
} type BridgeId = u64; pub trait Config: frame_system::Config<Hash = H256> { type Block: BlockT<Hash = H256, Header = Self::Header>; } impl Config for chain::Runtime { type Block = chain::Block; } #[derive(Encode, Decode, Clone, Serialize, Deserialize)] pub struct LightValidation<T: Config> { num_bridges: BridgeId, #[serde(bound( serialize = "T::Header: ::serde::Serialize", deserialize = "T::Header: ::serde::de::DeserializeOwned" ))] tracked_bridges: BTreeMap<BridgeId, BridgeInfo<T>>, } impl<T: Config> LightValidation<T> where NumberFor<T::Block>: AsPrimitive<usize>, { #[allow(clippy::new_without_default)] pub fn new() -> Self { LightValidation { num_bridges: 0, tracked_bridges: BTreeMap::new(), } } pub fn initialize_bridge( &mut self, block_header: T::Header, validator_set: AuthoritySet, proof: StorageProof, ) -> Result<BridgeId> { let state_root = block_header.state_root(); Self::check_validator_set_proof(state_root, proof, &validator_set.list, validator_set.id) .map_err(anyhow::Error::msg)?; let bridge_info = BridgeInfo::new(block_header, validator_set); let new_bridge_id = self.num_bridges + 1; self.tracked_bridges.insert(new_bridge_id, bridge_info); self.num_bridges = new_bridge_id; Ok(new_bridge_id) } /// Submits a sequence of block headers to the light client to validate /// /// The light client accepts a sequence of block headers, optionally with an authority set change /// in the last block. Without the authority set change, it assumes the authority set and the set /// id remains the same after submitting the blocks. One submission can have at most one authortiy /// set change (change.set_id == last_set_id + 1). pub fn submit_finalized_headers( &mut self, bridge_id: BridgeId, header: T::Header, ancestry_proof: Vec<T::Header>, grandpa_proof: EncodedJustification, auhtority_set_change: Option<AuthoritySetChange>, ) -> Result<()> { let bridge = self .tracked_bridges .get(&bridge_id) .ok_or_else(|| anyhow::Error::msg(Error::NoSuchBridgeExists))?; // Check that the new header is a decendent of the old header let last_header = &bridge.last_finalized_block_header; verify_ancestry(ancestry_proof, last_header.hash(), &header)?; let block_hash = header.hash(); let block_num = *header.number(); // Check that the header has been finalized let voters = &bridge.current_set; let voter_set = VoterSet::new(voters.list.clone()).unwrap(); let voter_set_id = voters.id; verify_grandpa_proof::<T::Block>( grandpa_proof, block_hash, block_num, voter_set_id, &voter_set, )?; match self.tracked_bridges.get_mut(&bridge_id) { Some(bridge_info) => { bridge_info.last_finalized_block_header = header; if let Some(change) = auhtority_set_change { // Check the validator set increment if change.authority_set.id != voter_set_id + 1 { return Err(anyhow::Error::msg(Error::UnexpectedValidatorSetId)); } // Check validator set change proof let state_root = bridge_info.last_finalized_block_header.state_root(); Self::check_validator_set_proof( state_root, change.authority_proof, &change.authority_set.list, change.authority_set.id, )?; // Commit bridge_info.current_set = AuthoritySet { list: change.authority_set.list, id: change.authority_set.id, } } } _ => panic!("We succesfully got this bridge earlier, therefore it exists; qed"), }; Ok(()) } pub fn validate_storage_proof( &self, state_root: T::Hash, proof: StorageProof, items: &[(&[u8], &[u8])], // &[(key, value)] ) -> Result<()> { let checker = StorageProofChecker::<T::Hashing>::new(state_root, proof)?; for (k, v) in items { let actual_value = checker .read_value(k)? .ok_or_else(|| anyhow::Error::msg(Error::StorageValueUnavailable))?; if actual_value.as_slice() != *v { return Err(anyhow::Error::msg(Error::StorageValueMismatch)); } } Ok(()) } } #[derive(Debug)] pub enum Error { // InvalidStorageProof, // StorageRootMismatch, StorageValueUnavailable, // InvalidValidatorSetProof, ValidatorSetMismatch, InvalidAncestryProof, NoSuchBridgeExists, InvalidFinalityProof, // UnknownClientError, // HeaderAncestryMismatch, UnexpectedValidatorSetId, StorageValueMismatch, } impl fmt::Display for Error { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { // Error::StorageRootMismatch => write!(f, "storage root mismatch"), Error::StorageValueUnavailable => write!(f, "storage value unavailable"), Error::ValidatorSetMismatch => write!(f, "validator set mismatch"), Error::InvalidAncestryProof => write!(f, "invalid ancestry proof"), Error::NoSuchBridgeExists => write!(f, "no such bridge exists"), Error::InvalidFinalityProof => write!(f, "invalid finality proof"), // Error::HeaderAncestryMismatch => write!(f, "header ancestry mismatch"), Error::UnexpectedValidatorSetId => write!(f, "unexpected validator set id"), Error::StorageValueMismatch => write!(f, "storage value mismatch"), } } } impl From<JustificationError> for Error { fn from(e: JustificationError) -> Self { match e { JustificationError::BadJustification(msg) => { error!("InvalidFinalityProof(BadJustification({}))", msg); Error::InvalidFinalityProof } JustificationError::JustificationDecode => { error!("InvalidFinalityProof(JustificationDecode)"); Error::InvalidFinalityProof } } } } impl<T: Config> LightValidation<T> where NumberFor<T::Block>: AsPrimitive<usize>, { fn check_validator_set_proof( state_root: &T::Hash, proof: StorageProof, validator_set: &[(AuthorityId, AuthorityWeight)], _set_id: SetId, ) -> Result<()> { let checker = <StorageProofChecker<T::Hashing>>::new(*state_root, proof)?; // By encoding the given set we should have an easy way to compare // with the stuff we get out of storage via `read_value` let mut encoded_validator_set = validator_set.encode(); encoded_validator_set.insert(0, 1); // Add AUTHORITIES_VERISON == 1 let actual_validator_set = checker .read_value(b":grandpa_authorities")? .ok_or_else(|| anyhow::Error::msg(Error::StorageValueUnavailable))?; // TODO: check set_id // checker.read_value(grandpa::CurrentSetId.key()) if encoded_validator_set == actual_validator_set { Ok(()) } else { Err(anyhow::Error::msg(Error::ValidatorSetMismatch)) } } } // A naive way to check whether a `child` header is a decendent // of an `ancestor` header. For this it requires a proof which // is a chain of headers between (but not including) the `child` // and `ancestor`. This could be updated to use something like // Log2 Ancestors (#2053) in the future. fn verify_ancestry<H>(proof: Vec<H>, ancestor_hash: H::Hash, child: &H) -> Result<()> where H: Header<Hash = H256>, { { info!("ancestor_hash: {}", ancestor_hash); for h in proof.iter() { info!( "block {:?} - hash: {} parent: {}", h.number(), h.hash(), h.parent_hash() ); } info!( "child block {:?} - hash: {} parent: {}", child.number(), child.hash(), child.parent_hash() ); } let mut parent_hash = child.parent_hash(); if *parent_hash == ancestor_hash { return Ok(()); } // If we find that the header's parent hash matches our ancestor's hash we're done for header in proof.iter() { // Need to check that blocks are actually related if header.hash() != *parent_hash { break; } parent_hash = header.parent_hash(); if *parent_hash == ancestor_hash { return Ok(()); } } Err(anyhow::Error::msg(Error::InvalidAncestryProof)) } fn verify_grandpa_proof<B>( justification: EncodedJustification, hash: B::Hash, number: NumberFor<B>, set_id: u64, voters: &VoterSet<AuthorityId>, ) -> Result<()> where B: BlockT<Hash = H256>, NumberFor<B>: finality_grandpa::BlockNumberOps, { // We don't really care about the justification, as long as it's valid let _ = GrandpaJustification::<B>::decode_and_verify_finalizes( &justification, (hash, number), set_id, voters, ) .map_err(anyhow::Error::msg)?; Ok(()) } impl<T: Config> fmt::Debug for LightValidation<T> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!( f, "LightValidationTest {{ num_bridges: {}, tracked_bridges: {:?} }}", self.num_bridges, self.tracked_bridges ) } } impl<T: Config> fmt::Debug for BridgeInfo<T> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "BridgeInfo {{ last_finalized_block_header: {:?}, current_validator_set: {:?}, current_validator_set_id: {} }}", self.last_finalized_block_header, self.current_set.list, self.current_set.id) } } pub mod utils { use parity_scale_codec::Encode; /// Gets the prefix of a storage item pub fn storage_prefix(module: &str, storage: &str) -> Vec<u8> { let mut bytes = sp_core::twox_128(module.as_bytes()).to_vec(); bytes.extend(&sp_core::twox_128(storage.as_bytes())[..]); bytes } /// Calculates the Substrate storage key prefix for a StorageMap pub fn storage_map_prefix_twox_64_concat( module: &[u8], storage_item: &[u8], key: &(impl Encode + ?Sized), ) -> Vec<u8> { let mut bytes = sp_core::twox_128(module).to_vec(); bytes.extend(&sp_core::twox_128(storage_item)[..]); let encoded = key.encode(); bytes.extend(sp_core::twox_64(&encoded)); bytes.extend(&encoded); bytes } #[test] #[ignore = "for debug"] fn show_keys() { let modules = [ "System", "Timestamp", "RandomnessCollectiveFlip", "Utility", "Multisig", "Proxy", "Vesting", "Scheduler", "Preimage", "ParachainInfo", "ParachainSystem", "XcmpQueue", "CumulusXcm", "DmpQueue", "PolkadotXcm", "Balances", "TransactionPayment", "Authorship", "CollatorSelection", "Session", "Aura", "AuraExt", "Identity", "Democracy", "Council", "Treasury", "Bounties", "Lottery", "TechnicalCommittee", "TechnicalMembership", "PhragmenElection", "Tips", "ChildBounties", "ChainBridge", "XcmBridge", "XTransfer", "PhalaMq", "PhalaRegistry", "PhalaComputation", "PhalaStakePool", "Assets", "AssetsRegistry", "PhalaStakePoolv2", "PhalaVault", "PhalaWrappedBalances", "PhalaBasePool", "Uniques", "RmrkCore", "RmrkEquip", "RmrkMarket", "PWNftSale", "PWIncubation", ]; for module in modules.iter() { let key = storage_prefix(module, ""); println!("{module}: 0x{}", hex::encode(key)); } let storage_keys = [ "Collections", "Nfts", "Priorities", "Children", "Resources", "EquippableBases", "EquippableSlots", "Properties", "Lock", "DummyStorage", ]; for key in storage_keys.iter() { let prefix = storage_prefix("RmrkCore", key); println!("RmrkCore::{key}: 0x{}", hex::encode(prefix)); } /* System: 0x26aa394eea5630e07c48ae0c9558cef799e9d85137db46ef4bbea33613baafd5 Timestamp: 0xf0c365c3cf59d671eb72da0e7a4113c499e9d85137db46ef4bbea33613baafd5 RandomnessCollectiveFlip: 0xbd2a529379475088d3e29a918cd4787299e9d85137db46ef4bbea33613baafd5 Utility: 0xd5e1a2fa16732ce6906189438c0a82c699e9d85137db46ef4bbea33613baafd5 Multisig: 0x7474449cca95dc5d0c00e71735a6d17d99e9d85137db46ef4bbea33613baafd5 Proxy: 0x1809d78346727a0ef58c0fa03bafa32399e9d85137db46ef4bbea33613baafd5 Vesting: 0x5f27b51b5ec208ee9cb25b55d872824399e9d85137db46ef4bbea33613baafd5 Scheduler: 0x3db7a24cfdc9de785974746c14a99df999e9d85137db46ef4bbea33613baafd5 Preimage: 0xd8f314b7f4e6b095f0f8ee4656a4482599e9d85137db46ef4bbea33613baafd5 ParachainInfo: 0x0d715f2646c8f85767b5d2764bb2782699e9d85137db46ef4bbea33613baafd5 ParachainSystem: 0x45323df7cc47150b3930e2666b0aa31399e9d85137db46ef4bbea33613baafd5 XcmpQueue: 0x7b3237373ffdfeb1cab4222e3b520d6b99e9d85137db46ef4bbea33613baafd5 CumulusXcm: 0x79e2fe5d327165001f8232643023ed8b99e9d85137db46ef4bbea33613baafd5 DmpQueue: 0xcd5c1f6df63bc97f4a8ce37f14a50ca799e9d85137db46ef4bbea33613baafd5 PolkadotXcm: 0xe38f185207498abb5c213d0fb059b3d899e9d85137db46ef4bbea33613baafd5 Balances: 0xc2261276cc9d1f8598ea4b6a74b15c2f99e9d85137db46ef4bbea33613baafd5 TransactionPayment: 0x3f1467a096bcd71a5b6a0c8155e2081099e9d85137db46ef4bbea33613baafd5 Authorship: 0xd57bce545fb382c34570e5dfbf338f5e99e9d85137db46ef4bbea33613baafd5 CollatorSelection: 0x15464cac3378d46f113cd5b7a4d71c8499e9d85137db46ef4bbea33613baafd5 Session: 0xcec5070d609dd3497f72bde07fc96ba099e9d85137db46ef4bbea33613baafd5 Aura: 0x57f8dc2f5ab09467896f47300f04243899e9d85137db46ef4bbea33613baafd5 AuraExt: 0x3c311d57d4daf52904616cf69648081e99e9d85137db46ef4bbea33613baafd5 Identity: 0x2aeddc77fe58c98d50bd37f1b90840f999e9d85137db46ef4bbea33613baafd5 Democracy: 0xf2794c22e353e9a839f12faab03a911b99e9d85137db46ef4bbea33613baafd5 Council: 0xaebd463ed9925c488c112434d61debc099e9d85137db46ef4bbea33613baafd5 Treasury: 0x89d139e01a5eb2256f222e5fc5dbe6b399e9d85137db46ef4bbea33613baafd5 Bounties: 0xa37f719efab16103103a0c8c2c784ce199e9d85137db46ef4bbea33613baafd5 Lottery: 0xfbc9f53700f75f681f234e70fb7241eb99e9d85137db46ef4bbea33613baafd5 TechnicalCommittee: 0xed25f63942de25ac5253ba64b5eb64d199e9d85137db46ef4bbea33613baafd5 TechnicalMembership: 0x3a2d6c9353500637d8f8e3e0fa0bb1c599e9d85137db46ef4bbea33613baafd5 PhragmenElection: 0xe2e62dd81c48a88f73b6f6463555fd8e99e9d85137db46ef4bbea33613baafd5 Tips: 0x2c5de123c468aef7f3ac2ab3a76f87ce99e9d85137db46ef4bbea33613baafd5 ChildBounties: 0xedfb05b766f199ce00df85317e33050e99e9d85137db46ef4bbea33613baafd5 ChainBridge: 0x43cdcd39d5edb1d16e24fa028edde0de99e9d85137db46ef4bbea33613baafd5 XcmBridge: 0x9d0cdc3697970df81fa5fabe88fa03ea99e9d85137db46ef4bbea33613baafd5 XTransfer: 0xc0cf946351a2b7b37cc8f3086b3674a199e9d85137db46ef4bbea33613baafd5 PhalaMq: 0x2f039a6a7f13e94b9545257e54062a0499e9d85137db46ef4bbea33613baafd5 PhalaRegistry: 0x0d746931e7a6bfd47fbcccfd71984aef99e9d85137db46ef4bbea33613baafd5 PhalaComputation: 0xb71c310d8c830d345ee1c1b84566a8d199e9d85137db46ef4bbea33613baafd5 PhalaStakePool: 0x9708ddcf89326bf4f4428dd135287d5199e9d85137db46ef4bbea33613baafd5 Assets: 0x682a59d51ab9e48a8c8cc418ff9708d299e9d85137db46ef4bbea33613baafd5 AssetsRegistry: 0xf7860e52b3d3660de35c808455ec483699e9d85137db46ef4bbea33613baafd5 PhalaStakePoolv2: 0x75e3ed3f59e45643ed1149ad80929c1b99e9d85137db46ef4bbea33613baafd5 PhalaVault: 0xa61c43efbf2367eb32ddcf956fc97dd499e9d85137db46ef4bbea33613baafd5 PhalaWrappedBalances: 0x1466de7f00add77dbab4df042b8c4a8499e9d85137db46ef4bbea33613baafd5 PhalaBasePool: 0x00f8eafbad3b4a32114491ad7e12491499e9d85137db46ef4bbea33613baafd5 Uniques: 0x5e8a19e3cd1b7c148b33880c479c028199e9d85137db46ef4bbea33613baafd5 RmrkCore: 0x5bef2c5471aa9e955551dc810f5abb3999e9d85137db46ef4bbea33613baafd5 RmrkEquip: 0x8c2ffe3a0b5892f363d8b9e374b9e9fc99e9d85137db46ef4bbea33613baafd5 RmrkMarket: 0x826a25a29a1da02112a6b8390475706699e9d85137db46ef4bbea33613baafd5 PWNftSale: 0x04d3f224a1307398074171146ffc417299e9d85137db46ef4bbea33613baafd5 PWIncubation: 0x66b8232d707a1e10cc0cc5a75b738ad299e9d85137db46ef4bbea33613baafd5 RmrkCore::Collections: 0x5bef2c5471aa9e955551dc810f5abb399200647b8c99af7b8b52752114831bdb RmrkCore::Nfts: 0x5bef2c5471aa9e955551dc810f5abb39e8d49389c2e23e152fdd6364daadd2cc RmrkCore::Priorities: 0x5bef2c5471aa9e955551dc810f5abb397f6749268d89e15586d82478e7290431 RmrkCore::Children: 0x5bef2c5471aa9e955551dc810f5abb39261f5a952a31d4199096219bbfd87740 RmrkCore::Resources: 0x5bef2c5471aa9e955551dc810f5abb392111e0df19de9563b58301e5f7e00743 RmrkCore::EquippableBases: 0x5bef2c5471aa9e955551dc810f5abb39ef660df27389f71e45f1741b554773fb RmrkCore::EquippableSlots: 0x5bef2c5471aa9e955551dc810f5abb393e26973064c5f9a17e8bfaa18aee3013 RmrkCore::Properties: 0x5bef2c5471aa9e955551dc810f5abb39a436740684271e6e2985d7bb452fdf99 RmrkCore::Lock: 0x5bef2c5471aa9e955551dc810f5abb39fb1ef94455cc6b5a3840206754686d98 RmrkCore::DummyStorage: 0x5bef2c5471aa9e955551dc810f5abb399439307cc9be85229487820a36657c35 */ } }
{ BridgeInfo { last_finalized_block_header: block_header, current_set: validator_set, } }
identifier_body
mod.rs
// Copyright 2017-2019 Parity Technologies (UK) Ltd. // This file is part of Substrate. // Substrate is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // Substrate is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License // along with Substrate. If not, see <http://www.gnu.org/licenses/>. //! # Bridge Module //! //! This will eventually have some useful documentation. //! For now though, enjoy this cow's wisdom. //! //!```ignore //!________________________________________ //! / You are only young once, but you can \ //! \ stay immature indefinitely. / //! ---------------------------------------- //! \ ^__^ //! \ (oo)\_______ //! (__)\ )\/\ //! ||----w | //! || || //!``` // // Ensure we're `no_std` when compiling for Wasm. // #![cfg_attr(not(feature = "std"), no_std)] mod error; mod justification; pub mod storage_proof; mod types; use im::OrdMap as BTreeMap; use std::fmt; use anyhow::Result; use error::JustificationError; use justification::GrandpaJustification; use log::{error, info}; use phala_serde_more as more; use serde::{Deserialize, Serialize}; use storage_proof::{StorageProof, StorageProofChecker}; use finality_grandpa::voter_set::VoterSet; use num::AsPrimitive; use parity_scale_codec::{Decode, Encode}; use sp_consensus_grandpa::{AuthorityId, AuthorityWeight, SetId}; use sp_core::H256; use sp_runtime::traits::{Block as BlockT, Header, NumberFor}; use sp_runtime::EncodedJustification; pub use types::{AuthoritySet, AuthoritySetChange}; #[derive(Encode, Decode, Clone, PartialEq, Serialize, Deserialize)] pub struct BridgeInfo<T: Config> { #[serde(bound( serialize = "T::Header: ::serde::Serialize", deserialize = "T::Header: ::serde::de::DeserializeOwned" ))] last_finalized_block_header: T::Header, #[serde(with = "more::scale_bytes")] current_set: AuthoritySet, } impl<T: Config> BridgeInfo<T> { pub fn new(block_header: T::Header, validator_set: AuthoritySet) -> Self { BridgeInfo { last_finalized_block_header: block_header, current_set: validator_set, } } } type BridgeId = u64; pub trait Config: frame_system::Config<Hash = H256> { type Block: BlockT<Hash = H256, Header = Self::Header>; } impl Config for chain::Runtime { type Block = chain::Block; } #[derive(Encode, Decode, Clone, Serialize, Deserialize)] pub struct LightValidation<T: Config> { num_bridges: BridgeId, #[serde(bound( serialize = "T::Header: ::serde::Serialize", deserialize = "T::Header: ::serde::de::DeserializeOwned" ))] tracked_bridges: BTreeMap<BridgeId, BridgeInfo<T>>, } impl<T: Config> LightValidation<T> where NumberFor<T::Block>: AsPrimitive<usize>, { #[allow(clippy::new_without_default)] pub fn new() -> Self { LightValidation { num_bridges: 0, tracked_bridges: BTreeMap::new(), } } pub fn
( &mut self, block_header: T::Header, validator_set: AuthoritySet, proof: StorageProof, ) -> Result<BridgeId> { let state_root = block_header.state_root(); Self::check_validator_set_proof(state_root, proof, &validator_set.list, validator_set.id) .map_err(anyhow::Error::msg)?; let bridge_info = BridgeInfo::new(block_header, validator_set); let new_bridge_id = self.num_bridges + 1; self.tracked_bridges.insert(new_bridge_id, bridge_info); self.num_bridges = new_bridge_id; Ok(new_bridge_id) } /// Submits a sequence of block headers to the light client to validate /// /// The light client accepts a sequence of block headers, optionally with an authority set change /// in the last block. Without the authority set change, it assumes the authority set and the set /// id remains the same after submitting the blocks. One submission can have at most one authortiy /// set change (change.set_id == last_set_id + 1). pub fn submit_finalized_headers( &mut self, bridge_id: BridgeId, header: T::Header, ancestry_proof: Vec<T::Header>, grandpa_proof: EncodedJustification, auhtority_set_change: Option<AuthoritySetChange>, ) -> Result<()> { let bridge = self .tracked_bridges .get(&bridge_id) .ok_or_else(|| anyhow::Error::msg(Error::NoSuchBridgeExists))?; // Check that the new header is a decendent of the old header let last_header = &bridge.last_finalized_block_header; verify_ancestry(ancestry_proof, last_header.hash(), &header)?; let block_hash = header.hash(); let block_num = *header.number(); // Check that the header has been finalized let voters = &bridge.current_set; let voter_set = VoterSet::new(voters.list.clone()).unwrap(); let voter_set_id = voters.id; verify_grandpa_proof::<T::Block>( grandpa_proof, block_hash, block_num, voter_set_id, &voter_set, )?; match self.tracked_bridges.get_mut(&bridge_id) { Some(bridge_info) => { bridge_info.last_finalized_block_header = header; if let Some(change) = auhtority_set_change { // Check the validator set increment if change.authority_set.id != voter_set_id + 1 { return Err(anyhow::Error::msg(Error::UnexpectedValidatorSetId)); } // Check validator set change proof let state_root = bridge_info.last_finalized_block_header.state_root(); Self::check_validator_set_proof( state_root, change.authority_proof, &change.authority_set.list, change.authority_set.id, )?; // Commit bridge_info.current_set = AuthoritySet { list: change.authority_set.list, id: change.authority_set.id, } } } _ => panic!("We succesfully got this bridge earlier, therefore it exists; qed"), }; Ok(()) } pub fn validate_storage_proof( &self, state_root: T::Hash, proof: StorageProof, items: &[(&[u8], &[u8])], // &[(key, value)] ) -> Result<()> { let checker = StorageProofChecker::<T::Hashing>::new(state_root, proof)?; for (k, v) in items { let actual_value = checker .read_value(k)? .ok_or_else(|| anyhow::Error::msg(Error::StorageValueUnavailable))?; if actual_value.as_slice() != *v { return Err(anyhow::Error::msg(Error::StorageValueMismatch)); } } Ok(()) } } #[derive(Debug)] pub enum Error { // InvalidStorageProof, // StorageRootMismatch, StorageValueUnavailable, // InvalidValidatorSetProof, ValidatorSetMismatch, InvalidAncestryProof, NoSuchBridgeExists, InvalidFinalityProof, // UnknownClientError, // HeaderAncestryMismatch, UnexpectedValidatorSetId, StorageValueMismatch, } impl fmt::Display for Error { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { // Error::StorageRootMismatch => write!(f, "storage root mismatch"), Error::StorageValueUnavailable => write!(f, "storage value unavailable"), Error::ValidatorSetMismatch => write!(f, "validator set mismatch"), Error::InvalidAncestryProof => write!(f, "invalid ancestry proof"), Error::NoSuchBridgeExists => write!(f, "no such bridge exists"), Error::InvalidFinalityProof => write!(f, "invalid finality proof"), // Error::HeaderAncestryMismatch => write!(f, "header ancestry mismatch"), Error::UnexpectedValidatorSetId => write!(f, "unexpected validator set id"), Error::StorageValueMismatch => write!(f, "storage value mismatch"), } } } impl From<JustificationError> for Error { fn from(e: JustificationError) -> Self { match e { JustificationError::BadJustification(msg) => { error!("InvalidFinalityProof(BadJustification({}))", msg); Error::InvalidFinalityProof } JustificationError::JustificationDecode => { error!("InvalidFinalityProof(JustificationDecode)"); Error::InvalidFinalityProof } } } } impl<T: Config> LightValidation<T> where NumberFor<T::Block>: AsPrimitive<usize>, { fn check_validator_set_proof( state_root: &T::Hash, proof: StorageProof, validator_set: &[(AuthorityId, AuthorityWeight)], _set_id: SetId, ) -> Result<()> { let checker = <StorageProofChecker<T::Hashing>>::new(*state_root, proof)?; // By encoding the given set we should have an easy way to compare // with the stuff we get out of storage via `read_value` let mut encoded_validator_set = validator_set.encode(); encoded_validator_set.insert(0, 1); // Add AUTHORITIES_VERISON == 1 let actual_validator_set = checker .read_value(b":grandpa_authorities")? .ok_or_else(|| anyhow::Error::msg(Error::StorageValueUnavailable))?; // TODO: check set_id // checker.read_value(grandpa::CurrentSetId.key()) if encoded_validator_set == actual_validator_set { Ok(()) } else { Err(anyhow::Error::msg(Error::ValidatorSetMismatch)) } } } // A naive way to check whether a `child` header is a decendent // of an `ancestor` header. For this it requires a proof which // is a chain of headers between (but not including) the `child` // and `ancestor`. This could be updated to use something like // Log2 Ancestors (#2053) in the future. fn verify_ancestry<H>(proof: Vec<H>, ancestor_hash: H::Hash, child: &H) -> Result<()> where H: Header<Hash = H256>, { { info!("ancestor_hash: {}", ancestor_hash); for h in proof.iter() { info!( "block {:?} - hash: {} parent: {}", h.number(), h.hash(), h.parent_hash() ); } info!( "child block {:?} - hash: {} parent: {}", child.number(), child.hash(), child.parent_hash() ); } let mut parent_hash = child.parent_hash(); if *parent_hash == ancestor_hash { return Ok(()); } // If we find that the header's parent hash matches our ancestor's hash we're done for header in proof.iter() { // Need to check that blocks are actually related if header.hash() != *parent_hash { break; } parent_hash = header.parent_hash(); if *parent_hash == ancestor_hash { return Ok(()); } } Err(anyhow::Error::msg(Error::InvalidAncestryProof)) } fn verify_grandpa_proof<B>( justification: EncodedJustification, hash: B::Hash, number: NumberFor<B>, set_id: u64, voters: &VoterSet<AuthorityId>, ) -> Result<()> where B: BlockT<Hash = H256>, NumberFor<B>: finality_grandpa::BlockNumberOps, { // We don't really care about the justification, as long as it's valid let _ = GrandpaJustification::<B>::decode_and_verify_finalizes( &justification, (hash, number), set_id, voters, ) .map_err(anyhow::Error::msg)?; Ok(()) } impl<T: Config> fmt::Debug for LightValidation<T> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!( f, "LightValidationTest {{ num_bridges: {}, tracked_bridges: {:?} }}", self.num_bridges, self.tracked_bridges ) } } impl<T: Config> fmt::Debug for BridgeInfo<T> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "BridgeInfo {{ last_finalized_block_header: {:?}, current_validator_set: {:?}, current_validator_set_id: {} }}", self.last_finalized_block_header, self.current_set.list, self.current_set.id) } } pub mod utils { use parity_scale_codec::Encode; /// Gets the prefix of a storage item pub fn storage_prefix(module: &str, storage: &str) -> Vec<u8> { let mut bytes = sp_core::twox_128(module.as_bytes()).to_vec(); bytes.extend(&sp_core::twox_128(storage.as_bytes())[..]); bytes } /// Calculates the Substrate storage key prefix for a StorageMap pub fn storage_map_prefix_twox_64_concat( module: &[u8], storage_item: &[u8], key: &(impl Encode + ?Sized), ) -> Vec<u8> { let mut bytes = sp_core::twox_128(module).to_vec(); bytes.extend(&sp_core::twox_128(storage_item)[..]); let encoded = key.encode(); bytes.extend(sp_core::twox_64(&encoded)); bytes.extend(&encoded); bytes } #[test] #[ignore = "for debug"] fn show_keys() { let modules = [ "System", "Timestamp", "RandomnessCollectiveFlip", "Utility", "Multisig", "Proxy", "Vesting", "Scheduler", "Preimage", "ParachainInfo", "ParachainSystem", "XcmpQueue", "CumulusXcm", "DmpQueue", "PolkadotXcm", "Balances", "TransactionPayment", "Authorship", "CollatorSelection", "Session", "Aura", "AuraExt", "Identity", "Democracy", "Council", "Treasury", "Bounties", "Lottery", "TechnicalCommittee", "TechnicalMembership", "PhragmenElection", "Tips", "ChildBounties", "ChainBridge", "XcmBridge", "XTransfer", "PhalaMq", "PhalaRegistry", "PhalaComputation", "PhalaStakePool", "Assets", "AssetsRegistry", "PhalaStakePoolv2", "PhalaVault", "PhalaWrappedBalances", "PhalaBasePool", "Uniques", "RmrkCore", "RmrkEquip", "RmrkMarket", "PWNftSale", "PWIncubation", ]; for module in modules.iter() { let key = storage_prefix(module, ""); println!("{module}: 0x{}", hex::encode(key)); } let storage_keys = [ "Collections", "Nfts", "Priorities", "Children", "Resources", "EquippableBases", "EquippableSlots", "Properties", "Lock", "DummyStorage", ]; for key in storage_keys.iter() { let prefix = storage_prefix("RmrkCore", key); println!("RmrkCore::{key}: 0x{}", hex::encode(prefix)); } /* System: 0x26aa394eea5630e07c48ae0c9558cef799e9d85137db46ef4bbea33613baafd5 Timestamp: 0xf0c365c3cf59d671eb72da0e7a4113c499e9d85137db46ef4bbea33613baafd5 RandomnessCollectiveFlip: 0xbd2a529379475088d3e29a918cd4787299e9d85137db46ef4bbea33613baafd5 Utility: 0xd5e1a2fa16732ce6906189438c0a82c699e9d85137db46ef4bbea33613baafd5 Multisig: 0x7474449cca95dc5d0c00e71735a6d17d99e9d85137db46ef4bbea33613baafd5 Proxy: 0x1809d78346727a0ef58c0fa03bafa32399e9d85137db46ef4bbea33613baafd5 Vesting: 0x5f27b51b5ec208ee9cb25b55d872824399e9d85137db46ef4bbea33613baafd5 Scheduler: 0x3db7a24cfdc9de785974746c14a99df999e9d85137db46ef4bbea33613baafd5 Preimage: 0xd8f314b7f4e6b095f0f8ee4656a4482599e9d85137db46ef4bbea33613baafd5 ParachainInfo: 0x0d715f2646c8f85767b5d2764bb2782699e9d85137db46ef4bbea33613baafd5 ParachainSystem: 0x45323df7cc47150b3930e2666b0aa31399e9d85137db46ef4bbea33613baafd5 XcmpQueue: 0x7b3237373ffdfeb1cab4222e3b520d6b99e9d85137db46ef4bbea33613baafd5 CumulusXcm: 0x79e2fe5d327165001f8232643023ed8b99e9d85137db46ef4bbea33613baafd5 DmpQueue: 0xcd5c1f6df63bc97f4a8ce37f14a50ca799e9d85137db46ef4bbea33613baafd5 PolkadotXcm: 0xe38f185207498abb5c213d0fb059b3d899e9d85137db46ef4bbea33613baafd5 Balances: 0xc2261276cc9d1f8598ea4b6a74b15c2f99e9d85137db46ef4bbea33613baafd5 TransactionPayment: 0x3f1467a096bcd71a5b6a0c8155e2081099e9d85137db46ef4bbea33613baafd5 Authorship: 0xd57bce545fb382c34570e5dfbf338f5e99e9d85137db46ef4bbea33613baafd5 CollatorSelection: 0x15464cac3378d46f113cd5b7a4d71c8499e9d85137db46ef4bbea33613baafd5 Session: 0xcec5070d609dd3497f72bde07fc96ba099e9d85137db46ef4bbea33613baafd5 Aura: 0x57f8dc2f5ab09467896f47300f04243899e9d85137db46ef4bbea33613baafd5 AuraExt: 0x3c311d57d4daf52904616cf69648081e99e9d85137db46ef4bbea33613baafd5 Identity: 0x2aeddc77fe58c98d50bd37f1b90840f999e9d85137db46ef4bbea33613baafd5 Democracy: 0xf2794c22e353e9a839f12faab03a911b99e9d85137db46ef4bbea33613baafd5 Council: 0xaebd463ed9925c488c112434d61debc099e9d85137db46ef4bbea33613baafd5 Treasury: 0x89d139e01a5eb2256f222e5fc5dbe6b399e9d85137db46ef4bbea33613baafd5 Bounties: 0xa37f719efab16103103a0c8c2c784ce199e9d85137db46ef4bbea33613baafd5 Lottery: 0xfbc9f53700f75f681f234e70fb7241eb99e9d85137db46ef4bbea33613baafd5 TechnicalCommittee: 0xed25f63942de25ac5253ba64b5eb64d199e9d85137db46ef4bbea33613baafd5 TechnicalMembership: 0x3a2d6c9353500637d8f8e3e0fa0bb1c599e9d85137db46ef4bbea33613baafd5 PhragmenElection: 0xe2e62dd81c48a88f73b6f6463555fd8e99e9d85137db46ef4bbea33613baafd5 Tips: 0x2c5de123c468aef7f3ac2ab3a76f87ce99e9d85137db46ef4bbea33613baafd5 ChildBounties: 0xedfb05b766f199ce00df85317e33050e99e9d85137db46ef4bbea33613baafd5 ChainBridge: 0x43cdcd39d5edb1d16e24fa028edde0de99e9d85137db46ef4bbea33613baafd5 XcmBridge: 0x9d0cdc3697970df81fa5fabe88fa03ea99e9d85137db46ef4bbea33613baafd5 XTransfer: 0xc0cf946351a2b7b37cc8f3086b3674a199e9d85137db46ef4bbea33613baafd5 PhalaMq: 0x2f039a6a7f13e94b9545257e54062a0499e9d85137db46ef4bbea33613baafd5 PhalaRegistry: 0x0d746931e7a6bfd47fbcccfd71984aef99e9d85137db46ef4bbea33613baafd5 PhalaComputation: 0xb71c310d8c830d345ee1c1b84566a8d199e9d85137db46ef4bbea33613baafd5 PhalaStakePool: 0x9708ddcf89326bf4f4428dd135287d5199e9d85137db46ef4bbea33613baafd5 Assets: 0x682a59d51ab9e48a8c8cc418ff9708d299e9d85137db46ef4bbea33613baafd5 AssetsRegistry: 0xf7860e52b3d3660de35c808455ec483699e9d85137db46ef4bbea33613baafd5 PhalaStakePoolv2: 0x75e3ed3f59e45643ed1149ad80929c1b99e9d85137db46ef4bbea33613baafd5 PhalaVault: 0xa61c43efbf2367eb32ddcf956fc97dd499e9d85137db46ef4bbea33613baafd5 PhalaWrappedBalances: 0x1466de7f00add77dbab4df042b8c4a8499e9d85137db46ef4bbea33613baafd5 PhalaBasePool: 0x00f8eafbad3b4a32114491ad7e12491499e9d85137db46ef4bbea33613baafd5 Uniques: 0x5e8a19e3cd1b7c148b33880c479c028199e9d85137db46ef4bbea33613baafd5 RmrkCore: 0x5bef2c5471aa9e955551dc810f5abb3999e9d85137db46ef4bbea33613baafd5 RmrkEquip: 0x8c2ffe3a0b5892f363d8b9e374b9e9fc99e9d85137db46ef4bbea33613baafd5 RmrkMarket: 0x826a25a29a1da02112a6b8390475706699e9d85137db46ef4bbea33613baafd5 PWNftSale: 0x04d3f224a1307398074171146ffc417299e9d85137db46ef4bbea33613baafd5 PWIncubation: 0x66b8232d707a1e10cc0cc5a75b738ad299e9d85137db46ef4bbea33613baafd5 RmrkCore::Collections: 0x5bef2c5471aa9e955551dc810f5abb399200647b8c99af7b8b52752114831bdb RmrkCore::Nfts: 0x5bef2c5471aa9e955551dc810f5abb39e8d49389c2e23e152fdd6364daadd2cc RmrkCore::Priorities: 0x5bef2c5471aa9e955551dc810f5abb397f6749268d89e15586d82478e7290431 RmrkCore::Children: 0x5bef2c5471aa9e955551dc810f5abb39261f5a952a31d4199096219bbfd87740 RmrkCore::Resources: 0x5bef2c5471aa9e955551dc810f5abb392111e0df19de9563b58301e5f7e00743 RmrkCore::EquippableBases: 0x5bef2c5471aa9e955551dc810f5abb39ef660df27389f71e45f1741b554773fb RmrkCore::EquippableSlots: 0x5bef2c5471aa9e955551dc810f5abb393e26973064c5f9a17e8bfaa18aee3013 RmrkCore::Properties: 0x5bef2c5471aa9e955551dc810f5abb39a436740684271e6e2985d7bb452fdf99 RmrkCore::Lock: 0x5bef2c5471aa9e955551dc810f5abb39fb1ef94455cc6b5a3840206754686d98 RmrkCore::DummyStorage: 0x5bef2c5471aa9e955551dc810f5abb399439307cc9be85229487820a36657c35 */ } }
initialize_bridge
identifier_name
autodetect.js
//autodetect is used to running the autodetection scheme for the configuration //and uses a statechart to do it. // // // // autoDetect should scan the current project folder for the following: // - a file named sc_config.json // - a folder called apps. // - If it exists, // - scan the subfolders, // - look for sc_config.json in the root of every subfolder // - if it exists, load it // - if it doesn't exist, take the default config (which is essentially a "normal" app object) // - a folder called modules // - If it exists, // - scan the subfolders, // - look for sc_config.json in the root of every subfolder // - if it exists, load it // - if it doesn't exist, take the default config (which is essentially a "normal" framework object) // // Not sure this should be loaded automatically. The idea is that apps detection should be greedy, but frameworks not. // we should be able to retrieve a framework based on the reference: ie: we create the framework and from the reference // (name) the framework should be able to either load itself, or add dependencies // - a folder called frameworks. // - if it exists: // - scan the subfolders, // - look for sc_config in the root of every subfolder // - if it exists, load it // - if it doesn't exist, take the default config (which is essentially a "normal" framework object) // // If no sc_config.json can be found, and no apps folder can be found, and no frameworks folder can be found // throw error, as not in a SC project root // var SC = require('sc-runtime'); var async = require('./async'); var fs = require('fs'); var path = require('path'); var jsonValidate = require('jsonschema').validate; var SCHEMAS = require('./schemas'); var util = require('util'); var vm = require('vm'); var parseSCConfig = function(scconfig,filepath){ // scconfig contains the result of fs.readFile var readConfig = {}; var isAsync = false; var ctx = vm.createContext({ // opts config: function(opts){ if(!opts.target) throw new Error("Missing target in SCConfig: " + filepath); if(!opts.type) throw new Error("Missing target type in SCConfig: " + filepath); readConfig[opts.target] = opts; }, }); vm.runInContext(scconfig,ctx,filepath); return readConfig; }; var testJSON = function(text){ var ret; try { ret = JSON.parse(text); return ret; } catch(e){ return undefined; } }; var namedHashToArray = function(hash){ if(SC.typeOf(hash) === SC.T_ARRAY) return hash; else { return Object.keys(hash).map(function(k){ if(!hash.name) hash.name = k; return hash; }); } }; var resolveReference = function(ref,context){ // 1. "sproutcore": depending on the context this is either an app, a framework or a module in the root of the project // 2. "sproutcore:desktop": this is the subframework desktop inside the sproutcore framework // 3. "sproutcore/lib/index.html": this is a reference to the file lib/index.html inside the sproutcore framework // 4. "http://my.host.ext": a url, is taken literally //context is one of "app","framework","module" var prefix, p; if(context === "app"){ prefix = "apps"; } else if(context === "framework"){ prefix = "frameworks"; } else if(context === "module"){ prefix = "modules"; } if(ref.indexOf("http") > -1){ return ref; // don't do anything } if(ref.indexOf(":") > -1){ p = ref.replace(/\:/g,"/frameworks/"); return path.join(prefix,p); } return path.join(prefix,ref); }; var projectPath; // we save all config files, and generate a complex of hashes for every object to create // in order for the actual creation process of all the levels to be easy and straightforward // meaning that the apps will be a hash where the name of the app is a key, // and the value an array of configurations. The same goes for the frameworks inside the apps, // except that the frameworks will be an array of arrays, where each array will contain the different // configuration for each level. var allConfigs = { project: null, apps: [] }; var resultConfig; var cb; var AppConfigParser = SC.Object.extend({ content: null, done: false, // observable to indicate ready with parsing init: function(){ // take the content, and start working if(!this.content) throw new Error("AppConfigParser: no content!"); this._fwconfigs = []; }, start: function(){ // we create a separate function in order to allow attaching observers before starting the procedure // content should be a hash or array if(!this.content.frameworks){ this.set('done',true); // nothing to do } else { this.content.frameworks = namedHashToArray(this.content.frameworks); // will convert if necessary if(this.content.frameworks.length === 0){ this.set('done',true); // nothing to do } else { this._currFWIndex = 0; this.takeNext(); } } }, _currFWIndex: null, takeNext: function(){ if(this._currFWIndex >= this.content.frameworks.length){ this.finish(); return; // done with parsing } // if not done with parsing var curFW = this.content.frameworks[this._currFWIndex]; var pOne,pTwo; if(SC.typeOf(curFW) === SC.T_STRING){ p = resolveReference(curFW,"framework"); } else { // hash if(!fw.path){ p = resolveReference(fw.name,"framework"); } else p = fw.path; } pOne = path.join(p,'sc_config.json'); pTwo = path.join(p,'sc_config'); async.exec(fs.readFile,pOne, { encoding: "utf8"}).notify(this,'nextJSONDidRead', pOne, fw); async.exec(fs.readFile,pTwo, { encoding: "utf8"}).notify(this,'nextJSDidRead', pTwo, fw); }, nextJSONDidRead: function(result, args){ //args[0] == path, args[1] == fw var data, ret; if(SC.ok(result)){ // there is a config file, check dependencies data = testJSON(result.get('result')); ret = jsonValidate(data, SCHEMAS.FRAMEWORK); if(ret && !ret.isValid){ throw new Error("Found syntax error in " + args[0]); } // valid json, now check deps if(data.dependencies && data.dependencies.length > 0){ data.dependencies.forEach(function(dep){ if(this.content.frameworks.indexOf(dep) === -1){ this.content.frameworks.push(dep); } }); } this._jsonReturn = data; } this._jsonDidReturn = true; this.proceedToNext(args[0],args[1]); }, nextJSDidRead: function(result, args){ if(SC.ok(result)){ // parse the config in result... // problem is that this might need another async call to figure out whether // target is an app or a framework... } this._jsDidReturn = true; this.proceedToNext(args[0],args[1]); }, // slots to store any return values on. If there is nothing to read, the value is true _jsonDidReturn: false, _jsDidReturn: false, _jsonReturn: false, _jsReturn: false, proceedToNext: function(fwpath, fw){ var ret; if(this._jsDidReturn && this._jsonDidReturn){ this._jsDidReturn = false; this._jsonDidReturn = false; // next take the data var data = this._jsReturn? this._jsReturn : this._jsonReturn? this._jsonReturn: { path: fw }; if(SC.typeOf(fw) === SC.T_HASH){ this._fwconfigs.push([args[1], data]); } else { this._fwconfigs.push(data); } this._jsReturn = this._jsonReturn = null; // reset return values this._currFWIndex += 1; this.takeNext(); } }, finish: function(){ // nextDidRead stores the temporary fw configs in this._fwconfigs, we need to replace the // original frameworks with a reversed _fwconfigs, and set done this.content.frameworks = this._fwconfigs.reverse(); this.set('done',true); } }); var AutoDetection = SC.Statechart.create({ rootState: SC.State.design({ initialSubstate: 'PROJECTCONFIG', PROJECTCONFIG: SC.State.design({ // detect project config file and load if necessary (the process can be a reload) enterState: function(){ async.exec(fs.readFile,path.join(projectPath,'sc_config.json'), {encoding: 'utf8'}).notify(AutoDetection,'readFileDidRespond'); }, readFileDidRespond: function(result){ if(SC.ok(result)){ //file exists var data = testJSON(result.get('result')); var ret = jsonValidate(data, SCHEMAS.PROJECT); if(ret && !ret.valid){ // error util.log('invalid project config: ' + util.inspect(ret.errors)); } else { util.log('valid project config, continuing...'); allConfigs.set('project',data); } } else { // file doesn't exist util.log('no project config file found'); } this.gotoState('APPS'); } }), APPS: SC.State.design({ // detect apps folder, if exists enterState: function(){ async.exec(fs.readdir, path.join(projectPath,'apps')).notify(AutoDetection, 'detectAppsDidRespond'); }, appnames: null, detectAppsDidRespond: function(result){ if(SC.ok(result)){ // apps folder does exist, now check the contents var list = result.get('result'); if(list.length > 0){ this.appnames = list; list.forEach(function(app){ async.exec(fs.readFile,path.join(projectPath,'apps',app,'sc_config.json')) .notify(AutoDetection, 'readAppConfigDidRespond',app); }); } else { // apps folder does exist, but is empty throw new Error("Did you create any application yet?"); } } else { // apps folder doesn't exist... not good throw new Error("We seem not to be in a project folder..."); } }, readAppConfigDidRespond: function(result,appname){ var app = appname[0]; if(SC.ok(result)){ // app config exists var data = testJSON(result.get('result')); var ret = jsonValidate(data, SCHEMAS.APP); if(ret && !ret.valid){ // error util.log('invalid app config: ' + util.inspect(ret.errors)); } else { util.log('valid project config, continuing...');
allConfigs.apps.push(data); } } else { util.log('app found with name ' + app + " but no config file detected."); allConfigs.apps.push(app); } this.appnames = this.appnames.without(app); if(this.appnames.length === 0){ this.gotoState('PARSEAPPS'); } } }), PARSEAPPS: SC.State.design({ // state to parse the detected apps // this means: // if the apps in the project config file is a name based hash, make it into an array // which either is an empty object, or contains the configuration of the app. // if the frameworks array in the apps configuration is a name hash, expand it // The main reason of parsing the configuration here is that any dependencies of frameworks // are worked out before doing framework.create(), as a framework itself cannot / should not // influence the app configuration, and the app should not have to figure out any fw configuration // we generate a configuration where each app is a name hash appConfig: null, enterState: function(){ this.appConfig = {}; var projApps = allConfigs.project? allConfigs.project.apps: null; // go through detected apps, and match with project apps allConfigs.apps.forEach(function(app){ // app in config can either be a app config that we read, or a string if it didn't exist var appName; if(SC.typeOf(app) === SC.T_STRING){ // if it is a string, there is no config in the app dir appName = app; this.appConfig[appName] = [{ name: appName}]; // so check the project config, we don't have to check the type, because that is the task of jsonschema } else { appName = app.name; if(!appName) throw new Error("If you include a config file in an app, you need to specify the name!"); else { this.appConfig[appName] = [app]; } } if(allConfigs.project && allConfigs.project.apps && allConfigs.project.apps[appName]){ this.appConfig[appName].push(allConfigs.project.apps[appName]); } },this); // now we have the basic configuration per application, we need to complete any framework dependencies // as in that we need to see whether the mentioned frameworks have dependencies, and include them // the app will do the sorting, and weed out any duplicates this._appsData = { }; util.log('appConfig: ' + util.inspect(this.appConfig)); Object.keys(this.appConfig).forEach(this.parseApp,this); }, _appsData: null, parseApp: function(appName){ util.log('parsing appName: ' + appName); var curApp = this.appConfig[appName]; // curApp is an array with configurations // for every item in the curApp array we create an instance of // AppConfigParser, and attach an observer to the done property curApp.forEach(function(conf, confIndex){ var t = AppConfigParser.create({ content: conf, appName: appName, index: confIndex }); t.addObserver('done',this,'ready'); if(!this._appsData[appName]) this._appsData[appName] = { configs: [] }; this._appsData[appName].configs[confIndex] = t; t.start(); },this); }, // in this setup, how big is the chance on a run condition? ready: function(target,key,value,rev){ // we get called for every app config, the app object contains appName and index var appname = target.get('appName'); var index = target.get('index'); if(target.get(key)){ allConfigs.apps[appname] = target.content; // replace read config with detected config... target.removeObserver('done', this,'ready'); // remove the observer } // we check the following for every app // length of this.appConfig[appName] needs to be equal to this._appsData[appName].configs // all this._appsData[appName].configs elements need to be done var ready = true; Object.keys(this.appConfig).forEach(function(name){ if(!this._appsData[name]){ ready = false; } else if(this.appConfig[name].length !== this._appsData[name].configs.length){ ready = false; } else if(!this._appsData[name].configs.everyProperty('done')){ ready = false; } },this); if(ready){ // everything is done, continue to the next stage util.log('all apps parsed...'); this.gotoState('CHECKSPROUTCORE'); } } }), CHECKSPROUTCORE: SC.State.design({ // this is to check whether sproutcore is in the frameworks directory. If it is there, // we will need to use that version instead of the built-in one. enterState: function(){ async.exec(fs.readdir, path.join(projectPath,'frameworks','sproutcore','sc_config.json')) .notify(AutoDetection, 'detectSproutcoreDidRespond'); }, detectSproutcoreDidRespond: function(result){ if(SC.ok(result)){ var data = testJSON(result.get('result')); var ret = jsonValidate(data, SCHEMAS.FRAMEWORK); if(ret && !ret.valid){ // error util.log('invalid sproutcore config: ' + util.inspect(ret.errors)); } else { util.log('valid sproutcore config, continuing...'); allConfigs.sproutcore = data; } } else { util.log('no sproutcore detected in frameworks folder'); } this.gotoState('FINISHED'); //cb(null,allConfigs); } }), FINISHED: SC.State.design({ enterState: function(){ cb(null,allConfigs) } }) }) }); module.exports = function(ppath, callback){ projectPath = ppath; AutoDetection.initStatechart(); cb = callback; };
if(!data.name) data.name = app;
random_line_split
product.ts
import { Component, ViewChild } from '@angular/core' import { NavController, NavParams, Content, AlertController, Platform } from 'ionic-angular' import { ProductService } from '../../providers/service/product-service' import { Values } from '../../providers/service/values' import { Functions } from '../../providers/service/functions' import { md5 } from './md5' import { CartPage } from '../cart/cart' import { AccountLogin } from '../account/login/login' import { CalendarComponentOptions, DayConfig } from 'ion2-calendar' import moment from 'moment' import { TranslateService } from '@ngx-translate/core' import { ProductsListPage } from '../products-list/products-list' import { OneSignal } from '@ionic-native/onesignal'; import { Geolocation } from '@ionic-native/geolocation/ngx'; import { NativeGeocoder, NativeGeocoderReverseResult, NativeGeocoderForwardResult, NativeGeocoderOptions } from '@ionic-native/native-geocoder'; import { Service } from '../../providers/service/service'; @Component({ templateUrl: 'product.html', }) export class ProductPage { @ViewChild(Content) content: Content providerOneSignal:any product: any = {} id: any type: any status: any options: any optionss: any opt: any message: any wishlist: any quantity: any reviews: any reviewForm: any nickname: any details: any BookNow: any disableSubmit: boolean = true wishlistIcon: boolean = false usedVariationAttributes: any = [] selectedService: any selectedTime: any mon: any = [] day: any month: any = 1 year: any disableWeekDays = [] daysConfig: DayConfig[] = [] optionsMulti: CalendarComponentOptions = { pickMode: 'single', daysConfig: this.daysConfig, disableWeeks: this.disableWeekDays, } schedule: any NoBlockAvailable = 'NoBlockAvailable' WhatTime = 'WhatTime' lan: any = {}; miLatitude = 0; miLongitude = 0; lat: string; long:string; autocomplete: { input: string; }; address:string; date:any = '2021-03-03'; hourInit:any = '08:00'; hourEnd:any = '22:00'; processDate:any; processHour:any; product_slot:any = [] customers: any; addresses: any; addressesCustomer: any; constructor( public alert:AlertController, public translate: TranslateService, public nav: NavController, public service: ProductService, public servi:Service, public otherservice: Service, params: NavParams, public functions: Functions, public values: Values, private platform: Platform, private geolocation: Geolocation, private nativeGeocoder: NativeGeocoder, ) { console.log("prueba id onesignal", this.values.userId); this.lat = ''; this.long = ''; this.options = [] this.optionss = [] this.quantity = '1' this.BookNow = 'BookNow' this.otherservice.getCustomer() .then((results) => this.handleCustomer(results)); this.otherservice.getAddress() .then((resultsAddresses) => this.handleAddress(resultsAddresses)); if (params.data.id) { this.selectedService = null this.product.product = params.data.id this.id = params.data.id this.product_slot = params.data.product_sl; this.date = params.data.date; this.hourInit = params.data.hourInit; this.hourEnd = params.data.hourEnd; // this.selectedTime = this.date+'T'+this.hourInit this.options.product_id = this.id this.usedVariationAttributes = (this.product.product .resources_full as Array<any>).map(item => item) console.log('usedVariationAttributes:', this.usedVariationAttributes) this.loadDataProduct(); } else { // this.options.product_id = this.id this.service .getProduct(params.data.id) .then(results => this.handleProductResults(results)) } this.getReviews() platform.ready().then(() => { const subscription = this.geolocation.watchPosition() .filter((p) => p.coords !== undefined) //Filter Out Errors .subscribe(position => { this.miLatitude = position.coords.latitude; this.miLongitude = position.coords.longitude; // console.log("locomiLocation=" + position.coords.latitude + ' ' + position.coords.longitude); }); }); this.servi.getHomerOneSignal(this.product.product.id).then((result:any) => this.providerOneSignal = result.providers[0].onesignal); //con esto antes obtenia el providerOneSignal // for (let i = 0; i < this.values.homerOneSignal.length; i++) { // if(this.values.homerOneSignal[i].product == this.product.product.id){ // this.providerOneSignal = this.values.homerOneSignal[i].providerOneSignal // } // } } loadDataProduct(){ //según el horario, deshabilitamos los dias de la semana que no están definidos en el Available this.disableWeekDays = [0, 1, 2, 3, 4, 5, 6] this.product.product.availability.forEach(element => { let day = Number((element.type as string).split(':')[1]) console.log({ day }) const index = this.disableWeekDays.indexOf(day) if (index > -1) { this.disableWeekDays.splice(index, 1) } }) console.log('this.daysConfig', this.daysConfig) console.log('this.disableWeekDays', this.disableWeekDays) //Ponemos los dias as marked (para que aparezcan de un color azul) 6 meses hacia adelante for (let index = 0; index < 180; index++) { let cur_day = moment() .add(index, 'days') .toDate() .getDay() const index_cur_day = this.disableWeekDays.indexOf(cur_day) if (index_cur_day > -1) { this.daysConfig.push({ date: moment() .add(index, 'days') .toDate(), disable: true, }) } this.daysConfig.push({ date: moment() .add(index, 'days') .toDate(), marked: true, }) } //Por defecto iniciamos con el booking deshabilitado this.disableSubmit = true } handleAddress(result){ this.addresses = result this.addressesCustomer = this.addresses.customer.billing_address.address_1 console.log(this.addressesCustomer) } handleCustomer(result){ this.customers = result } handleProductResults(results) { this.selectedService = null this.product.product = results this.id = results.id console.log('producto', this.product.product) this.options.product_id = this.id console.log('Product: ', this.product.product.resources_full) this.usedVariationAttributes = (this.product.product .resources_full as Array<any>).map(item => item) console.log('usedVariationAttributes:', this.usedVariationAttributes) this.loadDataProduct(); // this.product = results // this.usedVariationAttributes = this.product.product.attributes.filter( // function(attribute) { // return attribute.variation == true // }, // ) } getProduct(id) { this.nav.push(ProductPage, id) console.log(id) } addToCart() { // if (!this.values.isLoggedIn) { // this.functions.showAlert( // 'Options', // 'Please login or create an account to continue', // ) // this.nav.push(AccountLogin) // } //Validamos se el producto contiene resources // if ( // this.product.product.resources_full.length > 0 && // !this.selectedService // ) { // this.functions.showAlert( // 'Options', // 'Select a service and booking information', // ) // return // } var resource_id = !this.selectedService ? null : this.selectedService.resource_id ? this.selectedService.resource_id : null this.getAddressFromCoords(); var date = moment(this.date) var year = date.year() var month = date.month() var day = date.day() this.service.addOrders({ "clientUi": this.values.customerId, "nameClient": this.values.customerName, "productUi": this.product.product.id, "productName": this.product.product.name, "date": year+'/'+month+'/'+day, "hour": this.hourInit, "lat":this.lat, "lng":this.long, "onesignal":this.values.userId, "location" : this.addressesCustomer }); this.service.sendNotification({ "title":"Nueva solicitud", "content":`Usted ha recibido una solicitud de servicio de ${this.values.customerName}`, "onesignalid":this.providerOneSignal }) this.disableSubmit = true this.BookNow = 'PleaseWait' // var date = new Date(this.selectedTime); this.product_slot.map(result => { if(this.product.product.id == result.product_id) { var date = new Date(new Date(result.date)) var year = date.getFullYear() var month = date.getMonth() + 1 var day = date.getDate() this.service .addToCart( resource_id, month, day, year, result.date, this.product.product, ) .then(results => { console.log(results) }) this.values.count += parseInt(this.quantity) } }) this.disableSubmit = false this.BookNow = 'BookNow' this.showAlert('Solicitud enviada', '<strong>Exito:</strong> Has enviado una solicitud a tu homer correctamente'); this.returnHome() // } } showAlert(title, text) { let alert = this.alert.create({ title: title, subTitle: text, buttons: ['OK'], }); alert.present(); } setVariations() { this.product.product.attributes.forEach(item => { if (item.selected) { this.options['variation[attribute_pa_' + item.name + ']'] = item.selected } }) for (var i = 0; i < this.product.product.attributes.length; i++) { console.log(this.product.product.attributes[i].name) if ( this.product.product.attributes[i].variation && this.product.product.attributes[i].selected == undefined ) { this.functions.showAlert( 'Options', 'Please Select Product ' + this.product.product.attributes[i].name + ' Option', ) return false } } return true } onSelect($event, id) { let date = new Date($event.time) console.log({ date }) this.month = date.getUTCMonth() + 1 //months from 1-12 this.day = date.getUTCDate() this.year = date.getUTCFullYear() //si cambiamos la fecha reseteamos los horarios this.schedule = null this.selectedTime = null this.disableSubmit = true if ( this.product.product.resources_full && this.product.product.resources_full.length > 0 && !this.selectedService ) { this.functions.showAlert('error', this.lan.pleaseSelect) return } var resource_id = !this.selectedService ? null : this.selectedService.resource_id ? this.selectedService.resource_id : null // if (this.values.isLoggedIn) { this.service .getBlocks(this.day, this.month, this.year, id, resource_id) .then(results => { let res = results as string let find = '<li class="block"' let regex = new RegExp(find, 'g') res = res.replace( regex, '<li class="block" ng-click="selectSchedule()" ', ) console.log('schedule', res) var match = res.match(/data-value="(.*?)"/gi) if (!match) { this.schedule = null return } match.forEach((el, i, arr) => { arr[i] = el.replace('data-value=', '').replace(/"/g, '') }) this.schedule = match }) } update_blocks(a) { if (a.success == 'Success') { //this.functions.showAlert(a.success, a.message); this.values.blockslistId[this.product.product.id] = true } else { this.functions.showAlert('error', 'error') } } updateCart(a) { console.log('a:', a) this.disableSubmit = false this.values.count += parseInt(this.quantity) this.BookNow = 'BookNow' this.returnHome() // this.getCart() } returnHome(){ this.nav.push(ProductsListPage); } getCart() { this.nav.parent.select(2); } mySlideOptions = { initialSlide: 1, loop: true, autoplay: 5800, pager: true, } getReviews() { this.service.getReviews(this.id).then(results => this.handleReview(results)) } handleReview(a) { this.reviews = a for (let item in this.reviews.product_reviews) { this.reviews.product_reviews[item].avatar = md5( this.reviews.product_reviews[item].reviewer_email, ) } } addToWishlist(id) { if (this.values.isLoggedIn) { this.service.addToWishlist(id).then(results => this.update(results)) } else { this.functions.showAlert( 'Warning', 'Debe iniciar sesión para agregar un servicio a la lista de deseos', ) } } update(a) { if (a.success == 'Success') { //this.functions.showAlert(a.success, a.message); this.values.wishlistId[this.product.product.id] = true } else { this.functions.showAlert('error', 'error') } } removeFromWishlist(id) { this.values.wishlistId[id] = false this.service.deleteItem(id).then(results => this.updateWish(results, id)) } updateWish(results, id) { if (results.status == 'success') { this.values.wishlistId[id] = false } } chooseVariationOne(){ this.chooseVariation(this.optionss); } chooseVariation(option) { console.log(option); console.log(this.selectedService); if (this.selectedService) { this.selectedService = null this.product.product.price = this.product.product.minPrice } this.product.product.resources_full.forEach(item => { if (item.resource_id == option.resource_id) { this.selectedService = option this.product.product.price = this.selectedService.price this.disableSubmit = (this.product.product.resources_full.length > 0 && !this.selectedService) || !this.selectedTime } }) // this.product.product.variations.forEach(variation => { // var test = new Array(this.usedVariationAttributes.length) // test.fill(false) // this.usedVariationAttributes.forEach(attribute => { // if (variation.attributes.length == 0) { // this.options.variation_id = variation.id // this.product.product.in_stock = variation.in_stock // this.product.product.price = variation.price // this.product.product.sale_price = variation.sale_price // this.product.product.regular_price = variation.regular_price // } else { // variation.attributes.forEach((item, index) => { // if ( // attribute.selected && // item.name.toUpperCase() == attribute.name.toUpperCase() && // item.option.toUpperCase() == attribute.selected.toUpperCase() // ) { // test[index] = true // } // }) // if (test.every(v => v == true)) { // this.options.variation_id = variation.id // this.product.product.in_stock = variation.in_stock // this.product.product.price = variation.price // this.product.product.sale_price = variation.sale_price // this.product.product.regular_price = variation.regular_price // test.fill(false) // } // } // }) // }) } selectTime(time) { this.selectedTime = time this.disableSubmit = (this.product.product.resources_full.length > 0 && !this.selectedService) || !this.selectedTime } getTime(item) { return moment(item).format('hh:mm a') } ngOnInit() { this.translate.get(['Please select a service']).subscribe(translations => { this.lan.pleaseSelect = translations['Please select a service']; }); } getAddressFromCoords() { console.log("getAddressFromCoords "+this.miLatitude+" "+this.miLongitude); let options: NativeGeocoderOptions = { useLocale: true, maxResults: 5 }; this.nativeGeocoder.reverseGeocode(this.miLatitude, this.miLongitude, options) .then((result: NativeGeocoderReverseResult[]) => { console.log(JSON.stringify(result[0])) this.autocomplete.input = result[0].locality+', '+ result[0].administrativeArea+', '+ result[0].countryName; } ) .catch((error: any) =>{ this.address = "Address Not Available!"; console.log(error) }); this.lat = this.miLatitude.toString(); this.long = this.miLongitude.toString(); } getDate(date){
getTime1(time){ this.processHour = time } }
this.processDate = date }
identifier_body
product.ts
import { Component, ViewChild } from '@angular/core' import { NavController, NavParams, Content, AlertController, Platform } from 'ionic-angular' import { ProductService } from '../../providers/service/product-service' import { Values } from '../../providers/service/values' import { Functions } from '../../providers/service/functions' import { md5 } from './md5' import { CartPage } from '../cart/cart' import { AccountLogin } from '../account/login/login' import { CalendarComponentOptions, DayConfig } from 'ion2-calendar' import moment from 'moment' import { TranslateService } from '@ngx-translate/core' import { ProductsListPage } from '../products-list/products-list' import { OneSignal } from '@ionic-native/onesignal'; import { Geolocation } from '@ionic-native/geolocation/ngx'; import { NativeGeocoder, NativeGeocoderReverseResult, NativeGeocoderForwardResult, NativeGeocoderOptions } from '@ionic-native/native-geocoder'; import { Service } from '../../providers/service/service'; @Component({ templateUrl: 'product.html', }) export class ProductPage { @ViewChild(Content) content: Content providerOneSignal:any product: any = {} id: any type: any status: any options: any optionss: any opt: any message: any wishlist: any quantity: any reviews: any reviewForm: any nickname: any details: any BookNow: any disableSubmit: boolean = true wishlistIcon: boolean = false usedVariationAttributes: any = [] selectedService: any selectedTime: any mon: any = [] day: any month: any = 1 year: any disableWeekDays = [] daysConfig: DayConfig[] = [] optionsMulti: CalendarComponentOptions = { pickMode: 'single', daysConfig: this.daysConfig, disableWeeks: this.disableWeekDays, } schedule: any NoBlockAvailable = 'NoBlockAvailable' WhatTime = 'WhatTime' lan: any = {}; miLatitude = 0; miLongitude = 0; lat: string; long:string; autocomplete: { input: string; }; address:string; date:any = '2021-03-03'; hourInit:any = '08:00'; hourEnd:any = '22:00'; processDate:any; processHour:any; product_slot:any = []
addressesCustomer: any; constructor( public alert:AlertController, public translate: TranslateService, public nav: NavController, public service: ProductService, public servi:Service, public otherservice: Service, params: NavParams, public functions: Functions, public values: Values, private platform: Platform, private geolocation: Geolocation, private nativeGeocoder: NativeGeocoder, ) { console.log("prueba id onesignal", this.values.userId); this.lat = ''; this.long = ''; this.options = [] this.optionss = [] this.quantity = '1' this.BookNow = 'BookNow' this.otherservice.getCustomer() .then((results) => this.handleCustomer(results)); this.otherservice.getAddress() .then((resultsAddresses) => this.handleAddress(resultsAddresses)); if (params.data.id) { this.selectedService = null this.product.product = params.data.id this.id = params.data.id this.product_slot = params.data.product_sl; this.date = params.data.date; this.hourInit = params.data.hourInit; this.hourEnd = params.data.hourEnd; // this.selectedTime = this.date+'T'+this.hourInit this.options.product_id = this.id this.usedVariationAttributes = (this.product.product .resources_full as Array<any>).map(item => item) console.log('usedVariationAttributes:', this.usedVariationAttributes) this.loadDataProduct(); } else { // this.options.product_id = this.id this.service .getProduct(params.data.id) .then(results => this.handleProductResults(results)) } this.getReviews() platform.ready().then(() => { const subscription = this.geolocation.watchPosition() .filter((p) => p.coords !== undefined) //Filter Out Errors .subscribe(position => { this.miLatitude = position.coords.latitude; this.miLongitude = position.coords.longitude; // console.log("locomiLocation=" + position.coords.latitude + ' ' + position.coords.longitude); }); }); this.servi.getHomerOneSignal(this.product.product.id).then((result:any) => this.providerOneSignal = result.providers[0].onesignal); //con esto antes obtenia el providerOneSignal // for (let i = 0; i < this.values.homerOneSignal.length; i++) { // if(this.values.homerOneSignal[i].product == this.product.product.id){ // this.providerOneSignal = this.values.homerOneSignal[i].providerOneSignal // } // } } loadDataProduct(){ //según el horario, deshabilitamos los dias de la semana que no están definidos en el Available this.disableWeekDays = [0, 1, 2, 3, 4, 5, 6] this.product.product.availability.forEach(element => { let day = Number((element.type as string).split(':')[1]) console.log({ day }) const index = this.disableWeekDays.indexOf(day) if (index > -1) { this.disableWeekDays.splice(index, 1) } }) console.log('this.daysConfig', this.daysConfig) console.log('this.disableWeekDays', this.disableWeekDays) //Ponemos los dias as marked (para que aparezcan de un color azul) 6 meses hacia adelante for (let index = 0; index < 180; index++) { let cur_day = moment() .add(index, 'days') .toDate() .getDay() const index_cur_day = this.disableWeekDays.indexOf(cur_day) if (index_cur_day > -1) { this.daysConfig.push({ date: moment() .add(index, 'days') .toDate(), disable: true, }) } this.daysConfig.push({ date: moment() .add(index, 'days') .toDate(), marked: true, }) } //Por defecto iniciamos con el booking deshabilitado this.disableSubmit = true } handleAddress(result){ this.addresses = result this.addressesCustomer = this.addresses.customer.billing_address.address_1 console.log(this.addressesCustomer) } handleCustomer(result){ this.customers = result } handleProductResults(results) { this.selectedService = null this.product.product = results this.id = results.id console.log('producto', this.product.product) this.options.product_id = this.id console.log('Product: ', this.product.product.resources_full) this.usedVariationAttributes = (this.product.product .resources_full as Array<any>).map(item => item) console.log('usedVariationAttributes:', this.usedVariationAttributes) this.loadDataProduct(); // this.product = results // this.usedVariationAttributes = this.product.product.attributes.filter( // function(attribute) { // return attribute.variation == true // }, // ) } getProduct(id) { this.nav.push(ProductPage, id) console.log(id) } addToCart() { // if (!this.values.isLoggedIn) { // this.functions.showAlert( // 'Options', // 'Please login or create an account to continue', // ) // this.nav.push(AccountLogin) // } //Validamos se el producto contiene resources // if ( // this.product.product.resources_full.length > 0 && // !this.selectedService // ) { // this.functions.showAlert( // 'Options', // 'Select a service and booking information', // ) // return // } var resource_id = !this.selectedService ? null : this.selectedService.resource_id ? this.selectedService.resource_id : null this.getAddressFromCoords(); var date = moment(this.date) var year = date.year() var month = date.month() var day = date.day() this.service.addOrders({ "clientUi": this.values.customerId, "nameClient": this.values.customerName, "productUi": this.product.product.id, "productName": this.product.product.name, "date": year+'/'+month+'/'+day, "hour": this.hourInit, "lat":this.lat, "lng":this.long, "onesignal":this.values.userId, "location" : this.addressesCustomer }); this.service.sendNotification({ "title":"Nueva solicitud", "content":`Usted ha recibido una solicitud de servicio de ${this.values.customerName}`, "onesignalid":this.providerOneSignal }) this.disableSubmit = true this.BookNow = 'PleaseWait' // var date = new Date(this.selectedTime); this.product_slot.map(result => { if(this.product.product.id == result.product_id) { var date = new Date(new Date(result.date)) var year = date.getFullYear() var month = date.getMonth() + 1 var day = date.getDate() this.service .addToCart( resource_id, month, day, year, result.date, this.product.product, ) .then(results => { console.log(results) }) this.values.count += parseInt(this.quantity) } }) this.disableSubmit = false this.BookNow = 'BookNow' this.showAlert('Solicitud enviada', '<strong>Exito:</strong> Has enviado una solicitud a tu homer correctamente'); this.returnHome() // } } showAlert(title, text) { let alert = this.alert.create({ title: title, subTitle: text, buttons: ['OK'], }); alert.present(); } setVariations() { this.product.product.attributes.forEach(item => { if (item.selected) { this.options['variation[attribute_pa_' + item.name + ']'] = item.selected } }) for (var i = 0; i < this.product.product.attributes.length; i++) { console.log(this.product.product.attributes[i].name) if ( this.product.product.attributes[i].variation && this.product.product.attributes[i].selected == undefined ) { this.functions.showAlert( 'Options', 'Please Select Product ' + this.product.product.attributes[i].name + ' Option', ) return false } } return true } onSelect($event, id) { let date = new Date($event.time) console.log({ date }) this.month = date.getUTCMonth() + 1 //months from 1-12 this.day = date.getUTCDate() this.year = date.getUTCFullYear() //si cambiamos la fecha reseteamos los horarios this.schedule = null this.selectedTime = null this.disableSubmit = true if ( this.product.product.resources_full && this.product.product.resources_full.length > 0 && !this.selectedService ) { this.functions.showAlert('error', this.lan.pleaseSelect) return } var resource_id = !this.selectedService ? null : this.selectedService.resource_id ? this.selectedService.resource_id : null // if (this.values.isLoggedIn) { this.service .getBlocks(this.day, this.month, this.year, id, resource_id) .then(results => { let res = results as string let find = '<li class="block"' let regex = new RegExp(find, 'g') res = res.replace( regex, '<li class="block" ng-click="selectSchedule()" ', ) console.log('schedule', res) var match = res.match(/data-value="(.*?)"/gi) if (!match) { this.schedule = null return } match.forEach((el, i, arr) => { arr[i] = el.replace('data-value=', '').replace(/"/g, '') }) this.schedule = match }) } update_blocks(a) { if (a.success == 'Success') { //this.functions.showAlert(a.success, a.message); this.values.blockslistId[this.product.product.id] = true } else { this.functions.showAlert('error', 'error') } } updateCart(a) { console.log('a:', a) this.disableSubmit = false this.values.count += parseInt(this.quantity) this.BookNow = 'BookNow' this.returnHome() // this.getCart() } returnHome(){ this.nav.push(ProductsListPage); } getCart() { this.nav.parent.select(2); } mySlideOptions = { initialSlide: 1, loop: true, autoplay: 5800, pager: true, } getReviews() { this.service.getReviews(this.id).then(results => this.handleReview(results)) } handleReview(a) { this.reviews = a for (let item in this.reviews.product_reviews) { this.reviews.product_reviews[item].avatar = md5( this.reviews.product_reviews[item].reviewer_email, ) } } addToWishlist(id) { if (this.values.isLoggedIn) { this.service.addToWishlist(id).then(results => this.update(results)) } else { this.functions.showAlert( 'Warning', 'Debe iniciar sesión para agregar un servicio a la lista de deseos', ) } } update(a) { if (a.success == 'Success') { //this.functions.showAlert(a.success, a.message); this.values.wishlistId[this.product.product.id] = true } else { this.functions.showAlert('error', 'error') } } removeFromWishlist(id) { this.values.wishlistId[id] = false this.service.deleteItem(id).then(results => this.updateWish(results, id)) } updateWish(results, id) { if (results.status == 'success') { this.values.wishlistId[id] = false } } chooseVariationOne(){ this.chooseVariation(this.optionss); } chooseVariation(option) { console.log(option); console.log(this.selectedService); if (this.selectedService) { this.selectedService = null this.product.product.price = this.product.product.minPrice } this.product.product.resources_full.forEach(item => { if (item.resource_id == option.resource_id) { this.selectedService = option this.product.product.price = this.selectedService.price this.disableSubmit = (this.product.product.resources_full.length > 0 && !this.selectedService) || !this.selectedTime } }) // this.product.product.variations.forEach(variation => { // var test = new Array(this.usedVariationAttributes.length) // test.fill(false) // this.usedVariationAttributes.forEach(attribute => { // if (variation.attributes.length == 0) { // this.options.variation_id = variation.id // this.product.product.in_stock = variation.in_stock // this.product.product.price = variation.price // this.product.product.sale_price = variation.sale_price // this.product.product.regular_price = variation.regular_price // } else { // variation.attributes.forEach((item, index) => { // if ( // attribute.selected && // item.name.toUpperCase() == attribute.name.toUpperCase() && // item.option.toUpperCase() == attribute.selected.toUpperCase() // ) { // test[index] = true // } // }) // if (test.every(v => v == true)) { // this.options.variation_id = variation.id // this.product.product.in_stock = variation.in_stock // this.product.product.price = variation.price // this.product.product.sale_price = variation.sale_price // this.product.product.regular_price = variation.regular_price // test.fill(false) // } // } // }) // }) } selectTime(time) { this.selectedTime = time this.disableSubmit = (this.product.product.resources_full.length > 0 && !this.selectedService) || !this.selectedTime } getTime(item) { return moment(item).format('hh:mm a') } ngOnInit() { this.translate.get(['Please select a service']).subscribe(translations => { this.lan.pleaseSelect = translations['Please select a service']; }); } getAddressFromCoords() { console.log("getAddressFromCoords "+this.miLatitude+" "+this.miLongitude); let options: NativeGeocoderOptions = { useLocale: true, maxResults: 5 }; this.nativeGeocoder.reverseGeocode(this.miLatitude, this.miLongitude, options) .then((result: NativeGeocoderReverseResult[]) => { console.log(JSON.stringify(result[0])) this.autocomplete.input = result[0].locality+', '+ result[0].administrativeArea+', '+ result[0].countryName; } ) .catch((error: any) =>{ this.address = "Address Not Available!"; console.log(error) }); this.lat = this.miLatitude.toString(); this.long = this.miLongitude.toString(); } getDate(date){ this.processDate = date } getTime1(time){ this.processHour = time } }
customers: any; addresses: any;
random_line_split
product.ts
import { Component, ViewChild } from '@angular/core' import { NavController, NavParams, Content, AlertController, Platform } from 'ionic-angular' import { ProductService } from '../../providers/service/product-service' import { Values } from '../../providers/service/values' import { Functions } from '../../providers/service/functions' import { md5 } from './md5' import { CartPage } from '../cart/cart' import { AccountLogin } from '../account/login/login' import { CalendarComponentOptions, DayConfig } from 'ion2-calendar' import moment from 'moment' import { TranslateService } from '@ngx-translate/core' import { ProductsListPage } from '../products-list/products-list' import { OneSignal } from '@ionic-native/onesignal'; import { Geolocation } from '@ionic-native/geolocation/ngx'; import { NativeGeocoder, NativeGeocoderReverseResult, NativeGeocoderForwardResult, NativeGeocoderOptions } from '@ionic-native/native-geocoder'; import { Service } from '../../providers/service/service'; @Component({ templateUrl: 'product.html', }) export class ProductPage { @ViewChild(Content) content: Content providerOneSignal:any product: any = {} id: any type: any status: any options: any optionss: any opt: any message: any wishlist: any quantity: any reviews: any reviewForm: any nickname: any details: any BookNow: any disableSubmit: boolean = true wishlistIcon: boolean = false usedVariationAttributes: any = [] selectedService: any selectedTime: any mon: any = [] day: any month: any = 1 year: any disableWeekDays = [] daysConfig: DayConfig[] = [] optionsMulti: CalendarComponentOptions = { pickMode: 'single', daysConfig: this.daysConfig, disableWeeks: this.disableWeekDays, } schedule: any NoBlockAvailable = 'NoBlockAvailable' WhatTime = 'WhatTime' lan: any = {}; miLatitude = 0; miLongitude = 0; lat: string; long:string; autocomplete: { input: string; }; address:string; date:any = '2021-03-03'; hourInit:any = '08:00'; hourEnd:any = '22:00'; processDate:any; processHour:any; product_slot:any = [] customers: any; addresses: any; addressesCustomer: any; constructor( public alert:AlertController, public translate: TranslateService, public nav: NavController, public service: ProductService, public servi:Service, public otherservice: Service, params: NavParams, public functions: Functions, public values: Values, private platform: Platform, private geolocation: Geolocation, private nativeGeocoder: NativeGeocoder, ) { console.log("prueba id onesignal", this.values.userId); this.lat = ''; this.long = ''; this.options = [] this.optionss = [] this.quantity = '1' this.BookNow = 'BookNow' this.otherservice.getCustomer() .then((results) => this.handleCustomer(results)); this.otherservice.getAddress() .then((resultsAddresses) => this.handleAddress(resultsAddresses)); if (params.data.id) { this.selectedService = null this.product.product = params.data.id this.id = params.data.id this.product_slot = params.data.product_sl; this.date = params.data.date; this.hourInit = params.data.hourInit; this.hourEnd = params.data.hourEnd; // this.selectedTime = this.date+'T'+this.hourInit this.options.product_id = this.id this.usedVariationAttributes = (this.product.product .resources_full as Array<any>).map(item => item) console.log('usedVariationAttributes:', this.usedVariationAttributes) this.loadDataProduct(); } else { // this.options.product_id = this.id this.service .getProduct(params.data.id) .then(results => this.handleProductResults(results)) } this.getReviews() platform.ready().then(() => { const subscription = this.geolocation.watchPosition() .filter((p) => p.coords !== undefined) //Filter Out Errors .subscribe(position => { this.miLatitude = position.coords.latitude; this.miLongitude = position.coords.longitude; // console.log("locomiLocation=" + position.coords.latitude + ' ' + position.coords.longitude); }); }); this.servi.getHomerOneSignal(this.product.product.id).then((result:any) => this.providerOneSignal = result.providers[0].onesignal); //con esto antes obtenia el providerOneSignal // for (let i = 0; i < this.values.homerOneSignal.length; i++) { // if(this.values.homerOneSignal[i].product == this.product.product.id){ // this.providerOneSignal = this.values.homerOneSignal[i].providerOneSignal // } // } } loadDataProduct(){ //según el horario, deshabilitamos los dias de la semana que no están definidos en el Available this.disableWeekDays = [0, 1, 2, 3, 4, 5, 6] this.product.product.availability.forEach(element => { let day = Number((element.type as string).split(':')[1]) console.log({ day }) const index = this.disableWeekDays.indexOf(day) if (index > -1) { this.disableWeekDays.splice(index, 1) } }) console.log('this.daysConfig', this.daysConfig) console.log('this.disableWeekDays', this.disableWeekDays) //Ponemos los dias as marked (para que aparezcan de un color azul) 6 meses hacia adelante for (let index = 0; index < 180; index++) { let cur_day = moment() .add(index, 'days') .toDate() .getDay() const index_cur_day = this.disableWeekDays.indexOf(cur_day) if (index_cur_day > -1) {
this.daysConfig.push({ date: moment() .add(index, 'days') .toDate(), marked: true, }) } //Por defecto iniciamos con el booking deshabilitado this.disableSubmit = true } handleAddress(result){ this.addresses = result this.addressesCustomer = this.addresses.customer.billing_address.address_1 console.log(this.addressesCustomer) } handleCustomer(result){ this.customers = result } handleProductResults(results) { this.selectedService = null this.product.product = results this.id = results.id console.log('producto', this.product.product) this.options.product_id = this.id console.log('Product: ', this.product.product.resources_full) this.usedVariationAttributes = (this.product.product .resources_full as Array<any>).map(item => item) console.log('usedVariationAttributes:', this.usedVariationAttributes) this.loadDataProduct(); // this.product = results // this.usedVariationAttributes = this.product.product.attributes.filter( // function(attribute) { // return attribute.variation == true // }, // ) } getProduct(id) { this.nav.push(ProductPage, id) console.log(id) } addToCart() { // if (!this.values.isLoggedIn) { // this.functions.showAlert( // 'Options', // 'Please login or create an account to continue', // ) // this.nav.push(AccountLogin) // } //Validamos se el producto contiene resources // if ( // this.product.product.resources_full.length > 0 && // !this.selectedService // ) { // this.functions.showAlert( // 'Options', // 'Select a service and booking information', // ) // return // } var resource_id = !this.selectedService ? null : this.selectedService.resource_id ? this.selectedService.resource_id : null this.getAddressFromCoords(); var date = moment(this.date) var year = date.year() var month = date.month() var day = date.day() this.service.addOrders({ "clientUi": this.values.customerId, "nameClient": this.values.customerName, "productUi": this.product.product.id, "productName": this.product.product.name, "date": year+'/'+month+'/'+day, "hour": this.hourInit, "lat":this.lat, "lng":this.long, "onesignal":this.values.userId, "location" : this.addressesCustomer }); this.service.sendNotification({ "title":"Nueva solicitud", "content":`Usted ha recibido una solicitud de servicio de ${this.values.customerName}`, "onesignalid":this.providerOneSignal }) this.disableSubmit = true this.BookNow = 'PleaseWait' // var date = new Date(this.selectedTime); this.product_slot.map(result => { if(this.product.product.id == result.product_id) { var date = new Date(new Date(result.date)) var year = date.getFullYear() var month = date.getMonth() + 1 var day = date.getDate() this.service .addToCart( resource_id, month, day, year, result.date, this.product.product, ) .then(results => { console.log(results) }) this.values.count += parseInt(this.quantity) } }) this.disableSubmit = false this.BookNow = 'BookNow' this.showAlert('Solicitud enviada', '<strong>Exito:</strong> Has enviado una solicitud a tu homer correctamente'); this.returnHome() // } } showAlert(title, text) { let alert = this.alert.create({ title: title, subTitle: text, buttons: ['OK'], }); alert.present(); } setVariations() { this.product.product.attributes.forEach(item => { if (item.selected) { this.options['variation[attribute_pa_' + item.name + ']'] = item.selected } }) for (var i = 0; i < this.product.product.attributes.length; i++) { console.log(this.product.product.attributes[i].name) if ( this.product.product.attributes[i].variation && this.product.product.attributes[i].selected == undefined ) { this.functions.showAlert( 'Options', 'Please Select Product ' + this.product.product.attributes[i].name + ' Option', ) return false } } return true } onSelect($event, id) { let date = new Date($event.time) console.log({ date }) this.month = date.getUTCMonth() + 1 //months from 1-12 this.day = date.getUTCDate() this.year = date.getUTCFullYear() //si cambiamos la fecha reseteamos los horarios this.schedule = null this.selectedTime = null this.disableSubmit = true if ( this.product.product.resources_full && this.product.product.resources_full.length > 0 && !this.selectedService ) { this.functions.showAlert('error', this.lan.pleaseSelect) return } var resource_id = !this.selectedService ? null : this.selectedService.resource_id ? this.selectedService.resource_id : null // if (this.values.isLoggedIn) { this.service .getBlocks(this.day, this.month, this.year, id, resource_id) .then(results => { let res = results as string let find = '<li class="block"' let regex = new RegExp(find, 'g') res = res.replace( regex, '<li class="block" ng-click="selectSchedule()" ', ) console.log('schedule', res) var match = res.match(/data-value="(.*?)"/gi) if (!match) { this.schedule = null return } match.forEach((el, i, arr) => { arr[i] = el.replace('data-value=', '').replace(/"/g, '') }) this.schedule = match }) } update_blocks(a) { if (a.success == 'Success') { //this.functions.showAlert(a.success, a.message); this.values.blockslistId[this.product.product.id] = true } else { this.functions.showAlert('error', 'error') } } updateCart(a) { console.log('a:', a) this.disableSubmit = false this.values.count += parseInt(this.quantity) this.BookNow = 'BookNow' this.returnHome() // this.getCart() } returnHome(){ this.nav.push(ProductsListPage); } getCart() { this.nav.parent.select(2); } mySlideOptions = { initialSlide: 1, loop: true, autoplay: 5800, pager: true, } getReviews() { this.service.getReviews(this.id).then(results => this.handleReview(results)) } handleReview(a) { this.reviews = a for (let item in this.reviews.product_reviews) { this.reviews.product_reviews[item].avatar = md5( this.reviews.product_reviews[item].reviewer_email, ) } } addToWishlist(id) { if (this.values.isLoggedIn) { this.service.addToWishlist(id).then(results => this.update(results)) } else { this.functions.showAlert( 'Warning', 'Debe iniciar sesión para agregar un servicio a la lista de deseos', ) } } update(a) { if (a.success == 'Success') { //this.functions.showAlert(a.success, a.message); this.values.wishlistId[this.product.product.id] = true } else { this.functions.showAlert('error', 'error') } } removeFromWishlist(id) { this.values.wishlistId[id] = false this.service.deleteItem(id).then(results => this.updateWish(results, id)) } updateWish(results, id) { if (results.status == 'success') { this.values.wishlistId[id] = false } } chooseVariationOne(){ this.chooseVariation(this.optionss); } chooseVariation(option) { console.log(option); console.log(this.selectedService); if (this.selectedService) { this.selectedService = null this.product.product.price = this.product.product.minPrice } this.product.product.resources_full.forEach(item => { if (item.resource_id == option.resource_id) { this.selectedService = option this.product.product.price = this.selectedService.price this.disableSubmit = (this.product.product.resources_full.length > 0 && !this.selectedService) || !this.selectedTime } }) // this.product.product.variations.forEach(variation => { // var test = new Array(this.usedVariationAttributes.length) // test.fill(false) // this.usedVariationAttributes.forEach(attribute => { // if (variation.attributes.length == 0) { // this.options.variation_id = variation.id // this.product.product.in_stock = variation.in_stock // this.product.product.price = variation.price // this.product.product.sale_price = variation.sale_price // this.product.product.regular_price = variation.regular_price // } else { // variation.attributes.forEach((item, index) => { // if ( // attribute.selected && // item.name.toUpperCase() == attribute.name.toUpperCase() && // item.option.toUpperCase() == attribute.selected.toUpperCase() // ) { // test[index] = true // } // }) // if (test.every(v => v == true)) { // this.options.variation_id = variation.id // this.product.product.in_stock = variation.in_stock // this.product.product.price = variation.price // this.product.product.sale_price = variation.sale_price // this.product.product.regular_price = variation.regular_price // test.fill(false) // } // } // }) // }) } selectTime(time) { this.selectedTime = time this.disableSubmit = (this.product.product.resources_full.length > 0 && !this.selectedService) || !this.selectedTime } getTime(item) { return moment(item).format('hh:mm a') } ngOnInit() { this.translate.get(['Please select a service']).subscribe(translations => { this.lan.pleaseSelect = translations['Please select a service']; }); } getAddressFromCoords() { console.log("getAddressFromCoords "+this.miLatitude+" "+this.miLongitude); let options: NativeGeocoderOptions = { useLocale: true, maxResults: 5 }; this.nativeGeocoder.reverseGeocode(this.miLatitude, this.miLongitude, options) .then((result: NativeGeocoderReverseResult[]) => { console.log(JSON.stringify(result[0])) this.autocomplete.input = result[0].locality+', '+ result[0].administrativeArea+', '+ result[0].countryName; } ) .catch((error: any) =>{ this.address = "Address Not Available!"; console.log(error) }); this.lat = this.miLatitude.toString(); this.long = this.miLongitude.toString(); } getDate(date){ this.processDate = date } getTime1(time){ this.processHour = time } }
this.daysConfig.push({ date: moment() .add(index, 'days') .toDate(), disable: true, }) }
conditional_block
product.ts
import { Component, ViewChild } from '@angular/core' import { NavController, NavParams, Content, AlertController, Platform } from 'ionic-angular' import { ProductService } from '../../providers/service/product-service' import { Values } from '../../providers/service/values' import { Functions } from '../../providers/service/functions' import { md5 } from './md5' import { CartPage } from '../cart/cart' import { AccountLogin } from '../account/login/login' import { CalendarComponentOptions, DayConfig } from 'ion2-calendar' import moment from 'moment' import { TranslateService } from '@ngx-translate/core' import { ProductsListPage } from '../products-list/products-list' import { OneSignal } from '@ionic-native/onesignal'; import { Geolocation } from '@ionic-native/geolocation/ngx'; import { NativeGeocoder, NativeGeocoderReverseResult, NativeGeocoderForwardResult, NativeGeocoderOptions } from '@ionic-native/native-geocoder'; import { Service } from '../../providers/service/service'; @Component({ templateUrl: 'product.html', }) export class ProductPage { @ViewChild(Content) content: Content providerOneSignal:any product: any = {} id: any type: any status: any options: any optionss: any opt: any message: any wishlist: any quantity: any reviews: any reviewForm: any nickname: any details: any BookNow: any disableSubmit: boolean = true wishlistIcon: boolean = false usedVariationAttributes: any = [] selectedService: any selectedTime: any mon: any = [] day: any month: any = 1 year: any disableWeekDays = [] daysConfig: DayConfig[] = [] optionsMulti: CalendarComponentOptions = { pickMode: 'single', daysConfig: this.daysConfig, disableWeeks: this.disableWeekDays, } schedule: any NoBlockAvailable = 'NoBlockAvailable' WhatTime = 'WhatTime' lan: any = {}; miLatitude = 0; miLongitude = 0; lat: string; long:string; autocomplete: { input: string; }; address:string; date:any = '2021-03-03'; hourInit:any = '08:00'; hourEnd:any = '22:00'; processDate:any; processHour:any; product_slot:any = [] customers: any; addresses: any; addressesCustomer: any; constructor( public alert:AlertController, public translate: TranslateService, public nav: NavController, public service: ProductService, public servi:Service, public otherservice: Service, params: NavParams, public functions: Functions, public values: Values, private platform: Platform, private geolocation: Geolocation, private nativeGeocoder: NativeGeocoder, ) { console.log("prueba id onesignal", this.values.userId); this.lat = ''; this.long = ''; this.options = [] this.optionss = [] this.quantity = '1' this.BookNow = 'BookNow' this.otherservice.getCustomer() .then((results) => this.handleCustomer(results)); this.otherservice.getAddress() .then((resultsAddresses) => this.handleAddress(resultsAddresses)); if (params.data.id) { this.selectedService = null this.product.product = params.data.id this.id = params.data.id this.product_slot = params.data.product_sl; this.date = params.data.date; this.hourInit = params.data.hourInit; this.hourEnd = params.data.hourEnd; // this.selectedTime = this.date+'T'+this.hourInit this.options.product_id = this.id this.usedVariationAttributes = (this.product.product .resources_full as Array<any>).map(item => item) console.log('usedVariationAttributes:', this.usedVariationAttributes) this.loadDataProduct(); } else { // this.options.product_id = this.id this.service .getProduct(params.data.id) .then(results => this.handleProductResults(results)) } this.getReviews() platform.ready().then(() => { const subscription = this.geolocation.watchPosition() .filter((p) => p.coords !== undefined) //Filter Out Errors .subscribe(position => { this.miLatitude = position.coords.latitude; this.miLongitude = position.coords.longitude; // console.log("locomiLocation=" + position.coords.latitude + ' ' + position.coords.longitude); }); }); this.servi.getHomerOneSignal(this.product.product.id).then((result:any) => this.providerOneSignal = result.providers[0].onesignal); //con esto antes obtenia el providerOneSignal // for (let i = 0; i < this.values.homerOneSignal.length; i++) { // if(this.values.homerOneSignal[i].product == this.product.product.id){ // this.providerOneSignal = this.values.homerOneSignal[i].providerOneSignal // } // } } loadDataProduct(){ //según el horario, deshabilitamos los dias de la semana que no están definidos en el Available this.disableWeekDays = [0, 1, 2, 3, 4, 5, 6] this.product.product.availability.forEach(element => { let day = Number((element.type as string).split(':')[1]) console.log({ day }) const index = this.disableWeekDays.indexOf(day) if (index > -1) { this.disableWeekDays.splice(index, 1) } }) console.log('this.daysConfig', this.daysConfig) console.log('this.disableWeekDays', this.disableWeekDays) //Ponemos los dias as marked (para que aparezcan de un color azul) 6 meses hacia adelante for (let index = 0; index < 180; index++) { let cur_day = moment() .add(index, 'days') .toDate() .getDay() const index_cur_day = this.disableWeekDays.indexOf(cur_day) if (index_cur_day > -1) { this.daysConfig.push({ date: moment() .add(index, 'days') .toDate(), disable: true, }) } this.daysConfig.push({ date: moment() .add(index, 'days') .toDate(), marked: true, }) } //Por defecto iniciamos con el booking deshabilitado this.disableSubmit = true } handleAddress(result){ this.addresses = result this.addressesCustomer = this.addresses.customer.billing_address.address_1 console.log(this.addressesCustomer) } handleCustomer(result){ this.customers = result } handleProductResults(results) { this.selectedService = null this.product.product = results this.id = results.id console.log('producto', this.product.product) this.options.product_id = this.id console.log('Product: ', this.product.product.resources_full) this.usedVariationAttributes = (this.product.product .resources_full as Array<any>).map(item => item) console.log('usedVariationAttributes:', this.usedVariationAttributes) this.loadDataProduct(); // this.product = results // this.usedVariationAttributes = this.product.product.attributes.filter( // function(attribute) { // return attribute.variation == true // }, // ) } getProduct(id) { this.nav.push(ProductPage, id) console.log(id) } addToCart() { // if (!this.values.isLoggedIn) { // this.functions.showAlert( // 'Options', // 'Please login or create an account to continue', // ) // this.nav.push(AccountLogin) // } //Validamos se el producto contiene resources // if ( // this.product.product.resources_full.length > 0 && // !this.selectedService // ) { // this.functions.showAlert( // 'Options', // 'Select a service and booking information', // ) // return // } var resource_id = !this.selectedService ? null : this.selectedService.resource_id ? this.selectedService.resource_id : null this.getAddressFromCoords(); var date = moment(this.date) var year = date.year() var month = date.month() var day = date.day() this.service.addOrders({ "clientUi": this.values.customerId, "nameClient": this.values.customerName, "productUi": this.product.product.id, "productName": this.product.product.name, "date": year+'/'+month+'/'+day, "hour": this.hourInit, "lat":this.lat, "lng":this.long, "onesignal":this.values.userId, "location" : this.addressesCustomer }); this.service.sendNotification({ "title":"Nueva solicitud", "content":`Usted ha recibido una solicitud de servicio de ${this.values.customerName}`, "onesignalid":this.providerOneSignal }) this.disableSubmit = true this.BookNow = 'PleaseWait' // var date = new Date(this.selectedTime); this.product_slot.map(result => { if(this.product.product.id == result.product_id) { var date = new Date(new Date(result.date)) var year = date.getFullYear() var month = date.getMonth() + 1 var day = date.getDate() this.service .addToCart( resource_id, month, day, year, result.date, this.product.product, ) .then(results => { console.log(results) }) this.values.count += parseInt(this.quantity) } }) this.disableSubmit = false this.BookNow = 'BookNow' this.showAlert('Solicitud enviada', '<strong>Exito:</strong> Has enviado una solicitud a tu homer correctamente'); this.returnHome() // } } showAlert(title, text) { let alert = this.alert.create({ title: title, subTitle: text, buttons: ['OK'], }); alert.present(); } setVariations() { this.product.product.attributes.forEach(item => { if (item.selected) { this.options['variation[attribute_pa_' + item.name + ']'] = item.selected } }) for (var i = 0; i < this.product.product.attributes.length; i++) { console.log(this.product.product.attributes[i].name) if ( this.product.product.attributes[i].variation && this.product.product.attributes[i].selected == undefined ) { this.functions.showAlert( 'Options', 'Please Select Product ' + this.product.product.attributes[i].name + ' Option', ) return false } } return true } onSelect($event, id) { let date = new Date($event.time) console.log({ date }) this.month = date.getUTCMonth() + 1 //months from 1-12 this.day = date.getUTCDate() this.year = date.getUTCFullYear() //si cambiamos la fecha reseteamos los horarios this.schedule = null this.selectedTime = null this.disableSubmit = true if ( this.product.product.resources_full && this.product.product.resources_full.length > 0 && !this.selectedService ) { this.functions.showAlert('error', this.lan.pleaseSelect) return } var resource_id = !this.selectedService ? null : this.selectedService.resource_id ? this.selectedService.resource_id : null // if (this.values.isLoggedIn) { this.service .getBlocks(this.day, this.month, this.year, id, resource_id) .then(results => { let res = results as string let find = '<li class="block"' let regex = new RegExp(find, 'g') res = res.replace( regex, '<li class="block" ng-click="selectSchedule()" ', ) console.log('schedule', res) var match = res.match(/data-value="(.*?)"/gi) if (!match) { this.schedule = null return } match.forEach((el, i, arr) => { arr[i] = el.replace('data-value=', '').replace(/"/g, '') }) this.schedule = match }) } update_blocks(a) { if (a.success == 'Success') { //this.functions.showAlert(a.success, a.message); this.values.blockslistId[this.product.product.id] = true } else { this.functions.showAlert('error', 'error') } } updateCart(a) { console.log('a:', a) this.disableSubmit = false this.values.count += parseInt(this.quantity) this.BookNow = 'BookNow' this.returnHome() // this.getCart() } returnHome(){ this.nav.push(ProductsListPage); } getCart() { this.nav.parent.select(2); } mySlideOptions = { initialSlide: 1, loop: true, autoplay: 5800, pager: true, } getReviews() { this.service.getReviews(this.id).then(results => this.handleReview(results)) } handleReview(a) { this.reviews = a for (let item in this.reviews.product_reviews) { this.reviews.product_reviews[item].avatar = md5( this.reviews.product_reviews[item].reviewer_email, ) } } addToWishlist(id) { if (this.values.isLoggedIn) { this.service.addToWishlist(id).then(results => this.update(results)) } else { this.functions.showAlert( 'Warning', 'Debe iniciar sesión para agregar un servicio a la lista de deseos', ) } } update(a) { if (a.success == 'Success') { //this.functions.showAlert(a.success, a.message); this.values.wishlistId[this.product.product.id] = true } else { this.functions.showAlert('error', 'error') } } removeFromWishlist(id) { this.values.wishlistId[id] = false this.service.deleteItem(id).then(results => this.updateWish(results, id)) } updateWish(results, id) { if (results.status == 'success') { this.values.wishlistId[id] = false } } cho
this.chooseVariation(this.optionss); } chooseVariation(option) { console.log(option); console.log(this.selectedService); if (this.selectedService) { this.selectedService = null this.product.product.price = this.product.product.minPrice } this.product.product.resources_full.forEach(item => { if (item.resource_id == option.resource_id) { this.selectedService = option this.product.product.price = this.selectedService.price this.disableSubmit = (this.product.product.resources_full.length > 0 && !this.selectedService) || !this.selectedTime } }) // this.product.product.variations.forEach(variation => { // var test = new Array(this.usedVariationAttributes.length) // test.fill(false) // this.usedVariationAttributes.forEach(attribute => { // if (variation.attributes.length == 0) { // this.options.variation_id = variation.id // this.product.product.in_stock = variation.in_stock // this.product.product.price = variation.price // this.product.product.sale_price = variation.sale_price // this.product.product.regular_price = variation.regular_price // } else { // variation.attributes.forEach((item, index) => { // if ( // attribute.selected && // item.name.toUpperCase() == attribute.name.toUpperCase() && // item.option.toUpperCase() == attribute.selected.toUpperCase() // ) { // test[index] = true // } // }) // if (test.every(v => v == true)) { // this.options.variation_id = variation.id // this.product.product.in_stock = variation.in_stock // this.product.product.price = variation.price // this.product.product.sale_price = variation.sale_price // this.product.product.regular_price = variation.regular_price // test.fill(false) // } // } // }) // }) } selectTime(time) { this.selectedTime = time this.disableSubmit = (this.product.product.resources_full.length > 0 && !this.selectedService) || !this.selectedTime } getTime(item) { return moment(item).format('hh:mm a') } ngOnInit() { this.translate.get(['Please select a service']).subscribe(translations => { this.lan.pleaseSelect = translations['Please select a service']; }); } getAddressFromCoords() { console.log("getAddressFromCoords "+this.miLatitude+" "+this.miLongitude); let options: NativeGeocoderOptions = { useLocale: true, maxResults: 5 }; this.nativeGeocoder.reverseGeocode(this.miLatitude, this.miLongitude, options) .then((result: NativeGeocoderReverseResult[]) => { console.log(JSON.stringify(result[0])) this.autocomplete.input = result[0].locality+', '+ result[0].administrativeArea+', '+ result[0].countryName; } ) .catch((error: any) =>{ this.address = "Address Not Available!"; console.log(error) }); this.lat = this.miLatitude.toString(); this.long = this.miLongitude.toString(); } getDate(date){ this.processDate = date } getTime1(time){ this.processHour = time } }
oseVariationOne(){
identifier_name
codegen.rs
use super::*; use crate::ops::cast::cast; use crate::ops::math::add; use crate::ops::matmul::lir_unary::{ AddMatMulGeometry, LirMatMulUnary, MapOutputAxisToInput, ProtoFusedSpec, }; use crate::ops::matmul::mir_quant::{ combine_scales, compensate_zero_points, requant, wire_offset_u8_as_i8, }; use crate::ops::matmul::pack::MatMatMulPack; use crate::ops::nn::{Reduce, Reducer}; pub enum AxesOrPatch<'a> { Axes(&'a Axis, &'a Axis, &'a Axis), Patch(TypedModelPatch), NotAMatMul(&'a Axis), } pub(crate) fn codegen( op: &EinSum, model: &TypedModel, node: &TypedNode, ) -> TractResult<Option<TypedModelPatch>> { if (op.q_params.is_none() && node.inputs.len() != 2) || (op.q_params.is_some() && node.inputs.len() != 9) { return Ok(None); } let (m_axis, k_axis, n_axis) = match ensure_mkn_axes(op, model, node)? { AxesOrPatch::Axes(m, k, n) => (m, k, n), AxesOrPatch::Patch(p) => return Ok(Some(p)), AxesOrPatch::NotAMatMul(_) => return Ok(None), }; if op.q_params.is_none() { lir_mat_mul_unary(op, model, node, (m_axis, k_axis, n_axis)) .context("Translating to LirMatMul") } else { dequant(op, model, node, (m_axis, k_axis, n_axis)).context("Dequantize") } } pub(super) fn ensure_mkn_axes<'a>( op: &'a EinSum, model: &TypedModel, node: &TypedNode, ) -> TractResult<AxesOrPatch<'a>> { let input_facts = model.node_input_facts(node.id)?; let input_shapes: TVec<&[TDim]> = input_facts.iter().map(|f| &*f.shape).collect(); let output_shape = super::eval::output_shape(&op.axes, &input_shapes); let candidate_k_axes: TVec<&Axis> = op .axes .iter_all_axes() // Filter possible candidates (should be one time in each inputs but not in output) .filter(|a| { a.inputs[0].len() == 1 && a.inputs[1].len() == 1 && a.outputs[0].len() == 0 && input_facts[0].shape[a.inputs[0][0]] == input_facts[1].shape[a.inputs[1][0]] }) .collect(); let non_trivial_k_axis = candidate_k_axes .iter() .filter(|a| input_facts[0].shape[a.inputs[0][0]] > 1.to_dim()) .collect::<TVec<_>>(); let k_axis = if non_trivial_k_axis.len() > 1 { // TODO: handle case where multiple consecutive k in the same order in both input. bail!("Multiple k-axis candidate found"); } else { non_trivial_k_axis.get(0).copied().or_else(|| candidate_k_axes.get(0)).copied() }; let Some(k_axis) = k_axis else { return Ok(AxesOrPatch::Patch(inject_k_axis(op, model, node)?)); }; let m_axis = op .axes .iter_all_axes() .filter(|a| { a.inputs[0].len() == 1 && (a.inputs[1].len() == 0 || input_facts[1].shape[a.inputs[1][0]].is_one()) && a.outputs[0].len() == 1 }) .max_by_key(|a| &output_shape[a.outputs[0][0]]); let Some(m_axis) = m_axis else { return Ok(AxesOrPatch::Patch(inject_m_or_n_axis(op, model, node, false, &[k_axis])?)); }; let n_axis = op .axes .iter_all_axes() .filter(|a| { (a.inputs[0].len() == 0 || input_facts[0].shape[a.inputs[0][0]].is_one()) && a.inputs[1].len() == 1 && a.outputs[0].len() == 1 }) .max_by_key(|a| &output_shape[a.outputs[0][0]]); let Some(n_axis) = n_axis else { return Ok(AxesOrPatch::Patch(inject_m_or_n_axis( op, model, node, true, &[k_axis, m_axis], )?)); }; for axis in op.axes.iter_all_axes() { let one = TDim::one(); let in_left = axis.inputs[0].first().map(|pos| &input_facts[0].shape[*pos]).unwrap_or(&one) != &one; let in_right = axis.inputs[1].first().map(|pos| &input_facts[1].shape[*pos]).unwrap_or(&one) != &one; let in_out = axis.outputs[0].first().map(|pos| &output_shape[*pos]).unwrap_or(&one) != &one; if (in_left ^ in_right) && !in_out { return Ok(AxesOrPatch::NotAMatMul(axis)); } } Ok(AxesOrPatch::Axes(m_axis, k_axis, n_axis)) } pub(super) fn inject_k_axis( op: &EinSum, model: &TypedModel, node: &TypedNode, ) -> TractResult<TypedModelPatch> { let mut new_axes = op.axes.clone(); let name = &node.name; let mut patch = TypedModelPatch::new("inject k axis"); let mut wire = patch.taps(model, &node.inputs)?; let repr = new_axes.available_label(); new_axes = new_axes.with_extra_axis(repr, InOut::In(0), 0)?.with_extra_axis_occurency( repr, InOut::In(1), 0, )?; wire[0] = patch.wire_node(format!("{name}.add_k.0"), AxisOp::Add(0), &[wire[0]])?[0]; wire[1] = patch.wire_node(format!("{name}.add_k.1"), AxisOp::Add(0), &[wire[1]])?[0]; wire = patch.wire_node(&node.name, EinSum { axes: new_axes, ..op.clone() }, &wire)?; patch.shunt_outside(model, node.id.into(), wire[0])?; Ok(patch) } pub(super) fn inject_m_or_n_axis( op: &EinSum, model: &TypedModel, node: &TypedNode, is_n: bool, exclude: &[&Axis], ) -> TractResult<TypedModelPatch> { let input_to_fix = is_n as usize; let label = if is_n { "n" } else { "m" }; let input_facts = model.node_input_facts(node.id)?; let quasi_m_or_n_axis = op.axes.iter_all_axes().filter(|a| !exclude.contains(a)).find(|a| { (a.inputs[1 - input_to_fix].len() == 0 || input_facts[1 - input_to_fix].shape[a.inputs[1 - input_to_fix][0]].is_one()) && (a.inputs[input_to_fix].len() == 1 || a.outputs[0].len() == 1) }); let name = &node.name; let mut patch = TypedModelPatch::new("Injecting m or n axis"); let mut wire = patch.taps(model, &node.inputs)?; if let Some(axis) = quasi_m_or_n_axis { if axis.inputs[input_to_fix].len() == 1 { let new_axes = op.axes.clone().with_extra_axis('$', InOut::Out(0), 0)?.linking(axis.repr, '$')?; wire = patch.wire_node( format!("{name}.einsum"), EinSum { axes: new_axes, ..op.clone() }, &wire, )?; wire = patch.wire_node(&node.name, AxisOp::Rm(0), &wire)?; } else { let new_axes = op .axes .clone() .with_extra_axis('$', InOut::In(input_to_fix), 0)? .linking(axis.repr, '$')?; wire[input_to_fix] = patch.wire_node( format!("{name}.add_{label}"), AxisOp::Add(0), &[wire[input_to_fix]], )?[0]; wire = patch.wire_node(&node.name, EinSum { axes: new_axes, ..op.clone() }, &wire)?; } } else { let repr = op.axes.available_label(); let new_axes = op .axes .clone() .with_extra_axis(repr, InOut::In(input_to_fix), 0)? .with_extra_axis('$', InOut::Out(0), 0)? .linking(repr, '$')?; wire[input_to_fix] = patch.wire_node( format!("{name}.add_{label}"), AxisOp::Add(0), &[wire[input_to_fix]], )?[0]; wire = patch.wire_node( format!("{name}.einsum"), EinSum { axes: new_axes, ..op.clone() }, &wire, )?; wire = patch.wire_node(&node.name, AxisOp::Rm(0), &wire)?; } patch.shunt_outside(model, node.id.into(), wire[0])?; Ok(patch) } fn wire_axes_fix( patch: &mut TypedModelPatch, name: &str, var: &str, mapping: &AxesMapping, mut outlet: TVec<OutletId>, ) -> TractResult<TVec<OutletId>> { for (ix, axis_op) in mapping.translate_to_axis_ops()?.into_iter().enumerate() { outlet = patch.wire_node(format!("{name}.fix_{var}.{ix})"), axis_op, &outlet)?; } Ok(outlet) } fn
( op: &EinSum, model: &TypedModel, node: &TypedNode, (_, k_axis, _): (&Axis, &Axis, &Axis), ) -> TractResult<Option<TypedModelPatch>> { let name = &node.name; let mut patch = TypedModelPatch::new("Dequantizing einsum"); let taps = patch.taps(model, &node.inputs)?; let [a, b, bias, mut a0, mut a_scale, mut b0, b_scale, c0, c_scale] = *taps else { bail!("Expect exactly 9 inputs") }; if !patch.outlet_fact(a_scale)?.shape.volume().is_one() { let q_axis_in_output = op.axes.axis((InOut::In(4), 0))?.outputs[0][0]; let output_rank = node.outputs[0].fact.rank(); for i in 1..(output_rank - q_axis_in_output) { a_scale = patch.wire_node( format!("{name}.a_scale_axis_fix_{i}"), AxisOp::Add(i), &[a_scale], )?[0]; } } let a = wire_offset_u8_as_i8(&mut patch, &node.name, a, "a", &mut a0, "a0")?; let b = wire_offset_u8_as_i8(&mut patch, &node.name, b, "b", &mut b0, "b0")?; let mut output = patch.wire_node( &node.name, EinSum { q_params: None, axes: op.axes.extract_sub_mapping(&[0, 1], &[0])?, operating_dt: op.operating_dt, }, &[a, b], )?; let a_i32 = patch.wire_node(format!("{name}.a_as_i32"), cast(i32::datum_type()), &[a])?[0]; let b_i32 = patch.wire_node(format!("{name}.b_as_i32"), cast(i32::datum_type()), &[b])?[0]; let sum_a = patch.wire_node( format!("{name}.sum_a"), Reduce::new(tvec!(k_axis.inputs[0][0]), Reducer::Sum), &[a_i32], )?; let sum_b = patch.wire_node( format!("{name}.sum_b"), Reduce::new(tvec!(k_axis.inputs[1][0]), Reducer::Sum), &[b_i32], )?; let sum_a = wire_axes_fix(&mut patch, name, "sum_a", &op.axes.extract_sub_mapping(&[0], &[0])?, sum_a)?; let sum_b = wire_axes_fix(&mut patch, name, "sum_b", &op.axes.extract_sub_mapping(&[1], &[0])?, sum_b)?; let bias = tvec!(bias); let bias = wire_axes_fix(&mut patch, name, "bias", &op.axes.extract_sub_mapping(&[2], &[0])?, bias)?; let abc_scale = combine_scales(&mut patch, name, a_scale, b_scale, c_scale)?; output = patch.wire_node(format!("{name}.add_bias"), add(), &[output[0], bias[0]])?; let k = model.outlet_fact(node.inputs[0])?.shape[k_axis.inputs[0][0]].clone(); let output = compensate_zero_points(&mut patch, name, output[0], k, a0, b0, sum_a[0], sum_b[0]) .context("Zero point compensation")?; let output = requant(&mut patch, name, output, op.q_params.unwrap(), abc_scale, c0)?; patch.shunt_outside(model, node.id.into(), output)?; Ok(Some(patch)) } fn lir_mat_mul_unary( op: &EinSum, model: &TypedModel, node: &TypedNode, (m_axis, k_axis, n_axis): (&Axis, &Axis, &Axis), ) -> TractResult<Option<TypedModelPatch>> { let input_facts = model.node_input_facts(node.id)?; let a_m = m_axis.inputs[0][0]; let a_k = k_axis.inputs[0][0]; let b_n = n_axis.inputs[1][0]; let b_k = k_axis.inputs[1][0]; let c_m = m_axis.outputs[0][0]; let c_n = n_axis.outputs[0][0]; let m = &input_facts[0].shape[a_m]; let k = &input_facts[0].shape[a_k]; let n = &input_facts[1].shape[b_n]; if m < n { let expr = op .axes .iter_all_axes() .map(|axis| { let mut axis = axis.clone(); axis.inputs.swap(0, 1); axis }) .collect::<TVec<Axis>>(); return TypedModelPatch::replace_single_op( model, node, &[node.inputs[1], node.inputs[0]], EinSum { axes: AxesMapping::new(node.inputs.len(), 1, expr)?, ..op.clone() }, ) .map(Some); } let a_dt = input_facts[0].datum_type; let b_dt = input_facts[1].datum_type; let dt = op.operating_dt; let mmm = tract_linalg::ops() .mmm(a_dt, b_dt, dt, m.to_usize().ok(), k.to_usize().ok(), n.to_usize().ok()) .unwrap(); let name = &node.name; let mut patch = TypedModelPatch::new("Einsum to LirMatMulUnary"); let a = patch.tap_model(model, node.inputs[0])?; let b = patch.tap_model(model, node.inputs[1])?; let pack_a = MatMatMulPack { packer: mmm.a_pack(), k_axis: a_k, mn_axis: a_m }; let pack_b = MatMatMulPack { packer: mmm.b_pack(), k_axis: b_k, mn_axis: b_n }; let pa = patch.wire_node(format!("{name}.pack_a"), pack_a, &[a])?[0]; let pb = patch.wire_node(format!("{name}.pack_b"), pack_b, &[b])?[0]; let mut c_to_a_axis_mapping = tvec!(); let mut c_to_b_axis_mapping = tvec!(); for axis in op.axes.iter_all_axes().filter(|&axis| ![m_axis, k_axis, n_axis].contains(&axis)) { if let (&[c], &[a]) = (&*axis.outputs[0], &*axis.inputs[0]) { if input_facts[0].shape[a] != 1.to_dim() { let a = a - (a > a_m) as usize - (a > a_k) as usize; c_to_a_axis_mapping.push((c, a)); } } if let (&[c], &[b]) = (&*axis.outputs[0], &*axis.inputs[1]) { if input_facts[1].shape[b] != 1.to_dim() { let b = b - (b > b_n) as usize - (b > b_k) as usize; c_to_b_axis_mapping.push((c, b)); } } } let c_fact = op.output_facts(&input_facts)?.remove(0); let name = &node.name; let geo = AddMatMulGeometry { k: k.to_dim(), a_storage: None, b_storage: None, mmm: mmm.clone(), c_to_a_axis_mapping: MapOutputAxisToInput(c_to_a_axis_mapping), c_to_b_axis_mapping: MapOutputAxisToInput(c_to_b_axis_mapping), }; let output = unsafe { mmm.c_view(c_m, c_n) }; let lir = LirMatMulUnary::new( mmm, c_fact, c_m, c_n, vec![ProtoFusedSpec::AddMatMul(geo, 0, 1), ProtoFusedSpec::Store(output)], ) .context("Creating LirMatMulUnary")?; let output = patch.wire_node(name, lir, &[pa, pb])?[0]; patch.shunt_outside(model, node.id.into(), output)?; Ok(Some(patch)) }
dequant
identifier_name
codegen.rs
use super::*; use crate::ops::cast::cast; use crate::ops::math::add; use crate::ops::matmul::lir_unary::{ AddMatMulGeometry, LirMatMulUnary, MapOutputAxisToInput, ProtoFusedSpec, }; use crate::ops::matmul::mir_quant::{ combine_scales, compensate_zero_points, requant, wire_offset_u8_as_i8, }; use crate::ops::matmul::pack::MatMatMulPack; use crate::ops::nn::{Reduce, Reducer}; pub enum AxesOrPatch<'a> { Axes(&'a Axis, &'a Axis, &'a Axis), Patch(TypedModelPatch), NotAMatMul(&'a Axis), } pub(crate) fn codegen( op: &EinSum, model: &TypedModel, node: &TypedNode, ) -> TractResult<Option<TypedModelPatch>> { if (op.q_params.is_none() && node.inputs.len() != 2) || (op.q_params.is_some() && node.inputs.len() != 9) { return Ok(None); } let (m_axis, k_axis, n_axis) = match ensure_mkn_axes(op, model, node)? { AxesOrPatch::Axes(m, k, n) => (m, k, n), AxesOrPatch::Patch(p) => return Ok(Some(p)), AxesOrPatch::NotAMatMul(_) => return Ok(None), }; if op.q_params.is_none() { lir_mat_mul_unary(op, model, node, (m_axis, k_axis, n_axis)) .context("Translating to LirMatMul") } else { dequant(op, model, node, (m_axis, k_axis, n_axis)).context("Dequantize") } } pub(super) fn ensure_mkn_axes<'a>( op: &'a EinSum, model: &TypedModel, node: &TypedNode, ) -> TractResult<AxesOrPatch<'a>> { let input_facts = model.node_input_facts(node.id)?; let input_shapes: TVec<&[TDim]> = input_facts.iter().map(|f| &*f.shape).collect(); let output_shape = super::eval::output_shape(&op.axes, &input_shapes); let candidate_k_axes: TVec<&Axis> = op .axes .iter_all_axes() // Filter possible candidates (should be one time in each inputs but not in output) .filter(|a| { a.inputs[0].len() == 1 && a.inputs[1].len() == 1 && a.outputs[0].len() == 0 && input_facts[0].shape[a.inputs[0][0]] == input_facts[1].shape[a.inputs[1][0]] }) .collect(); let non_trivial_k_axis = candidate_k_axes .iter() .filter(|a| input_facts[0].shape[a.inputs[0][0]] > 1.to_dim()) .collect::<TVec<_>>(); let k_axis = if non_trivial_k_axis.len() > 1
else { non_trivial_k_axis.get(0).copied().or_else(|| candidate_k_axes.get(0)).copied() }; let Some(k_axis) = k_axis else { return Ok(AxesOrPatch::Patch(inject_k_axis(op, model, node)?)); }; let m_axis = op .axes .iter_all_axes() .filter(|a| { a.inputs[0].len() == 1 && (a.inputs[1].len() == 0 || input_facts[1].shape[a.inputs[1][0]].is_one()) && a.outputs[0].len() == 1 }) .max_by_key(|a| &output_shape[a.outputs[0][0]]); let Some(m_axis) = m_axis else { return Ok(AxesOrPatch::Patch(inject_m_or_n_axis(op, model, node, false, &[k_axis])?)); }; let n_axis = op .axes .iter_all_axes() .filter(|a| { (a.inputs[0].len() == 0 || input_facts[0].shape[a.inputs[0][0]].is_one()) && a.inputs[1].len() == 1 && a.outputs[0].len() == 1 }) .max_by_key(|a| &output_shape[a.outputs[0][0]]); let Some(n_axis) = n_axis else { return Ok(AxesOrPatch::Patch(inject_m_or_n_axis( op, model, node, true, &[k_axis, m_axis], )?)); }; for axis in op.axes.iter_all_axes() { let one = TDim::one(); let in_left = axis.inputs[0].first().map(|pos| &input_facts[0].shape[*pos]).unwrap_or(&one) != &one; let in_right = axis.inputs[1].first().map(|pos| &input_facts[1].shape[*pos]).unwrap_or(&one) != &one; let in_out = axis.outputs[0].first().map(|pos| &output_shape[*pos]).unwrap_or(&one) != &one; if (in_left ^ in_right) && !in_out { return Ok(AxesOrPatch::NotAMatMul(axis)); } } Ok(AxesOrPatch::Axes(m_axis, k_axis, n_axis)) } pub(super) fn inject_k_axis( op: &EinSum, model: &TypedModel, node: &TypedNode, ) -> TractResult<TypedModelPatch> { let mut new_axes = op.axes.clone(); let name = &node.name; let mut patch = TypedModelPatch::new("inject k axis"); let mut wire = patch.taps(model, &node.inputs)?; let repr = new_axes.available_label(); new_axes = new_axes.with_extra_axis(repr, InOut::In(0), 0)?.with_extra_axis_occurency( repr, InOut::In(1), 0, )?; wire[0] = patch.wire_node(format!("{name}.add_k.0"), AxisOp::Add(0), &[wire[0]])?[0]; wire[1] = patch.wire_node(format!("{name}.add_k.1"), AxisOp::Add(0), &[wire[1]])?[0]; wire = patch.wire_node(&node.name, EinSum { axes: new_axes, ..op.clone() }, &wire)?; patch.shunt_outside(model, node.id.into(), wire[0])?; Ok(patch) } pub(super) fn inject_m_or_n_axis( op: &EinSum, model: &TypedModel, node: &TypedNode, is_n: bool, exclude: &[&Axis], ) -> TractResult<TypedModelPatch> { let input_to_fix = is_n as usize; let label = if is_n { "n" } else { "m" }; let input_facts = model.node_input_facts(node.id)?; let quasi_m_or_n_axis = op.axes.iter_all_axes().filter(|a| !exclude.contains(a)).find(|a| { (a.inputs[1 - input_to_fix].len() == 0 || input_facts[1 - input_to_fix].shape[a.inputs[1 - input_to_fix][0]].is_one()) && (a.inputs[input_to_fix].len() == 1 || a.outputs[0].len() == 1) }); let name = &node.name; let mut patch = TypedModelPatch::new("Injecting m or n axis"); let mut wire = patch.taps(model, &node.inputs)?; if let Some(axis) = quasi_m_or_n_axis { if axis.inputs[input_to_fix].len() == 1 { let new_axes = op.axes.clone().with_extra_axis('$', InOut::Out(0), 0)?.linking(axis.repr, '$')?; wire = patch.wire_node( format!("{name}.einsum"), EinSum { axes: new_axes, ..op.clone() }, &wire, )?; wire = patch.wire_node(&node.name, AxisOp::Rm(0), &wire)?; } else { let new_axes = op .axes .clone() .with_extra_axis('$', InOut::In(input_to_fix), 0)? .linking(axis.repr, '$')?; wire[input_to_fix] = patch.wire_node( format!("{name}.add_{label}"), AxisOp::Add(0), &[wire[input_to_fix]], )?[0]; wire = patch.wire_node(&node.name, EinSum { axes: new_axes, ..op.clone() }, &wire)?; } } else { let repr = op.axes.available_label(); let new_axes = op .axes .clone() .with_extra_axis(repr, InOut::In(input_to_fix), 0)? .with_extra_axis('$', InOut::Out(0), 0)? .linking(repr, '$')?; wire[input_to_fix] = patch.wire_node( format!("{name}.add_{label}"), AxisOp::Add(0), &[wire[input_to_fix]], )?[0]; wire = patch.wire_node( format!("{name}.einsum"), EinSum { axes: new_axes, ..op.clone() }, &wire, )?; wire = patch.wire_node(&node.name, AxisOp::Rm(0), &wire)?; } patch.shunt_outside(model, node.id.into(), wire[0])?; Ok(patch) } fn wire_axes_fix( patch: &mut TypedModelPatch, name: &str, var: &str, mapping: &AxesMapping, mut outlet: TVec<OutletId>, ) -> TractResult<TVec<OutletId>> { for (ix, axis_op) in mapping.translate_to_axis_ops()?.into_iter().enumerate() { outlet = patch.wire_node(format!("{name}.fix_{var}.{ix})"), axis_op, &outlet)?; } Ok(outlet) } fn dequant( op: &EinSum, model: &TypedModel, node: &TypedNode, (_, k_axis, _): (&Axis, &Axis, &Axis), ) -> TractResult<Option<TypedModelPatch>> { let name = &node.name; let mut patch = TypedModelPatch::new("Dequantizing einsum"); let taps = patch.taps(model, &node.inputs)?; let [a, b, bias, mut a0, mut a_scale, mut b0, b_scale, c0, c_scale] = *taps else { bail!("Expect exactly 9 inputs") }; if !patch.outlet_fact(a_scale)?.shape.volume().is_one() { let q_axis_in_output = op.axes.axis((InOut::In(4), 0))?.outputs[0][0]; let output_rank = node.outputs[0].fact.rank(); for i in 1..(output_rank - q_axis_in_output) { a_scale = patch.wire_node( format!("{name}.a_scale_axis_fix_{i}"), AxisOp::Add(i), &[a_scale], )?[0]; } } let a = wire_offset_u8_as_i8(&mut patch, &node.name, a, "a", &mut a0, "a0")?; let b = wire_offset_u8_as_i8(&mut patch, &node.name, b, "b", &mut b0, "b0")?; let mut output = patch.wire_node( &node.name, EinSum { q_params: None, axes: op.axes.extract_sub_mapping(&[0, 1], &[0])?, operating_dt: op.operating_dt, }, &[a, b], )?; let a_i32 = patch.wire_node(format!("{name}.a_as_i32"), cast(i32::datum_type()), &[a])?[0]; let b_i32 = patch.wire_node(format!("{name}.b_as_i32"), cast(i32::datum_type()), &[b])?[0]; let sum_a = patch.wire_node( format!("{name}.sum_a"), Reduce::new(tvec!(k_axis.inputs[0][0]), Reducer::Sum), &[a_i32], )?; let sum_b = patch.wire_node( format!("{name}.sum_b"), Reduce::new(tvec!(k_axis.inputs[1][0]), Reducer::Sum), &[b_i32], )?; let sum_a = wire_axes_fix(&mut patch, name, "sum_a", &op.axes.extract_sub_mapping(&[0], &[0])?, sum_a)?; let sum_b = wire_axes_fix(&mut patch, name, "sum_b", &op.axes.extract_sub_mapping(&[1], &[0])?, sum_b)?; let bias = tvec!(bias); let bias = wire_axes_fix(&mut patch, name, "bias", &op.axes.extract_sub_mapping(&[2], &[0])?, bias)?; let abc_scale = combine_scales(&mut patch, name, a_scale, b_scale, c_scale)?; output = patch.wire_node(format!("{name}.add_bias"), add(), &[output[0], bias[0]])?; let k = model.outlet_fact(node.inputs[0])?.shape[k_axis.inputs[0][0]].clone(); let output = compensate_zero_points(&mut patch, name, output[0], k, a0, b0, sum_a[0], sum_b[0]) .context("Zero point compensation")?; let output = requant(&mut patch, name, output, op.q_params.unwrap(), abc_scale, c0)?; patch.shunt_outside(model, node.id.into(), output)?; Ok(Some(patch)) } fn lir_mat_mul_unary( op: &EinSum, model: &TypedModel, node: &TypedNode, (m_axis, k_axis, n_axis): (&Axis, &Axis, &Axis), ) -> TractResult<Option<TypedModelPatch>> { let input_facts = model.node_input_facts(node.id)?; let a_m = m_axis.inputs[0][0]; let a_k = k_axis.inputs[0][0]; let b_n = n_axis.inputs[1][0]; let b_k = k_axis.inputs[1][0]; let c_m = m_axis.outputs[0][0]; let c_n = n_axis.outputs[0][0]; let m = &input_facts[0].shape[a_m]; let k = &input_facts[0].shape[a_k]; let n = &input_facts[1].shape[b_n]; if m < n { let expr = op .axes .iter_all_axes() .map(|axis| { let mut axis = axis.clone(); axis.inputs.swap(0, 1); axis }) .collect::<TVec<Axis>>(); return TypedModelPatch::replace_single_op( model, node, &[node.inputs[1], node.inputs[0]], EinSum { axes: AxesMapping::new(node.inputs.len(), 1, expr)?, ..op.clone() }, ) .map(Some); } let a_dt = input_facts[0].datum_type; let b_dt = input_facts[1].datum_type; let dt = op.operating_dt; let mmm = tract_linalg::ops() .mmm(a_dt, b_dt, dt, m.to_usize().ok(), k.to_usize().ok(), n.to_usize().ok()) .unwrap(); let name = &node.name; let mut patch = TypedModelPatch::new("Einsum to LirMatMulUnary"); let a = patch.tap_model(model, node.inputs[0])?; let b = patch.tap_model(model, node.inputs[1])?; let pack_a = MatMatMulPack { packer: mmm.a_pack(), k_axis: a_k, mn_axis: a_m }; let pack_b = MatMatMulPack { packer: mmm.b_pack(), k_axis: b_k, mn_axis: b_n }; let pa = patch.wire_node(format!("{name}.pack_a"), pack_a, &[a])?[0]; let pb = patch.wire_node(format!("{name}.pack_b"), pack_b, &[b])?[0]; let mut c_to_a_axis_mapping = tvec!(); let mut c_to_b_axis_mapping = tvec!(); for axis in op.axes.iter_all_axes().filter(|&axis| ![m_axis, k_axis, n_axis].contains(&axis)) { if let (&[c], &[a]) = (&*axis.outputs[0], &*axis.inputs[0]) { if input_facts[0].shape[a] != 1.to_dim() { let a = a - (a > a_m) as usize - (a > a_k) as usize; c_to_a_axis_mapping.push((c, a)); } } if let (&[c], &[b]) = (&*axis.outputs[0], &*axis.inputs[1]) { if input_facts[1].shape[b] != 1.to_dim() { let b = b - (b > b_n) as usize - (b > b_k) as usize; c_to_b_axis_mapping.push((c, b)); } } } let c_fact = op.output_facts(&input_facts)?.remove(0); let name = &node.name; let geo = AddMatMulGeometry { k: k.to_dim(), a_storage: None, b_storage: None, mmm: mmm.clone(), c_to_a_axis_mapping: MapOutputAxisToInput(c_to_a_axis_mapping), c_to_b_axis_mapping: MapOutputAxisToInput(c_to_b_axis_mapping), }; let output = unsafe { mmm.c_view(c_m, c_n) }; let lir = LirMatMulUnary::new( mmm, c_fact, c_m, c_n, vec![ProtoFusedSpec::AddMatMul(geo, 0, 1), ProtoFusedSpec::Store(output)], ) .context("Creating LirMatMulUnary")?; let output = patch.wire_node(name, lir, &[pa, pb])?[0]; patch.shunt_outside(model, node.id.into(), output)?; Ok(Some(patch)) }
{ // TODO: handle case where multiple consecutive k in the same order in both input. bail!("Multiple k-axis candidate found"); }
conditional_block
codegen.rs
use super::*; use crate::ops::cast::cast; use crate::ops::math::add; use crate::ops::matmul::lir_unary::{ AddMatMulGeometry, LirMatMulUnary, MapOutputAxisToInput, ProtoFusedSpec, }; use crate::ops::matmul::mir_quant::{ combine_scales, compensate_zero_points, requant, wire_offset_u8_as_i8, }; use crate::ops::matmul::pack::MatMatMulPack; use crate::ops::nn::{Reduce, Reducer}; pub enum AxesOrPatch<'a> { Axes(&'a Axis, &'a Axis, &'a Axis), Patch(TypedModelPatch), NotAMatMul(&'a Axis), } pub(crate) fn codegen( op: &EinSum, model: &TypedModel, node: &TypedNode, ) -> TractResult<Option<TypedModelPatch>> { if (op.q_params.is_none() && node.inputs.len() != 2) || (op.q_params.is_some() && node.inputs.len() != 9) { return Ok(None); } let (m_axis, k_axis, n_axis) = match ensure_mkn_axes(op, model, node)? { AxesOrPatch::Axes(m, k, n) => (m, k, n), AxesOrPatch::Patch(p) => return Ok(Some(p)), AxesOrPatch::NotAMatMul(_) => return Ok(None), }; if op.q_params.is_none() { lir_mat_mul_unary(op, model, node, (m_axis, k_axis, n_axis)) .context("Translating to LirMatMul") } else { dequant(op, model, node, (m_axis, k_axis, n_axis)).context("Dequantize") } } pub(super) fn ensure_mkn_axes<'a>( op: &'a EinSum, model: &TypedModel, node: &TypedNode, ) -> TractResult<AxesOrPatch<'a>>
pub(super) fn inject_k_axis( op: &EinSum, model: &TypedModel, node: &TypedNode, ) -> TractResult<TypedModelPatch> { let mut new_axes = op.axes.clone(); let name = &node.name; let mut patch = TypedModelPatch::new("inject k axis"); let mut wire = patch.taps(model, &node.inputs)?; let repr = new_axes.available_label(); new_axes = new_axes.with_extra_axis(repr, InOut::In(0), 0)?.with_extra_axis_occurency( repr, InOut::In(1), 0, )?; wire[0] = patch.wire_node(format!("{name}.add_k.0"), AxisOp::Add(0), &[wire[0]])?[0]; wire[1] = patch.wire_node(format!("{name}.add_k.1"), AxisOp::Add(0), &[wire[1]])?[0]; wire = patch.wire_node(&node.name, EinSum { axes: new_axes, ..op.clone() }, &wire)?; patch.shunt_outside(model, node.id.into(), wire[0])?; Ok(patch) } pub(super) fn inject_m_or_n_axis( op: &EinSum, model: &TypedModel, node: &TypedNode, is_n: bool, exclude: &[&Axis], ) -> TractResult<TypedModelPatch> { let input_to_fix = is_n as usize; let label = if is_n { "n" } else { "m" }; let input_facts = model.node_input_facts(node.id)?; let quasi_m_or_n_axis = op.axes.iter_all_axes().filter(|a| !exclude.contains(a)).find(|a| { (a.inputs[1 - input_to_fix].len() == 0 || input_facts[1 - input_to_fix].shape[a.inputs[1 - input_to_fix][0]].is_one()) && (a.inputs[input_to_fix].len() == 1 || a.outputs[0].len() == 1) }); let name = &node.name; let mut patch = TypedModelPatch::new("Injecting m or n axis"); let mut wire = patch.taps(model, &node.inputs)?; if let Some(axis) = quasi_m_or_n_axis { if axis.inputs[input_to_fix].len() == 1 { let new_axes = op.axes.clone().with_extra_axis('$', InOut::Out(0), 0)?.linking(axis.repr, '$')?; wire = patch.wire_node( format!("{name}.einsum"), EinSum { axes: new_axes, ..op.clone() }, &wire, )?; wire = patch.wire_node(&node.name, AxisOp::Rm(0), &wire)?; } else { let new_axes = op .axes .clone() .with_extra_axis('$', InOut::In(input_to_fix), 0)? .linking(axis.repr, '$')?; wire[input_to_fix] = patch.wire_node( format!("{name}.add_{label}"), AxisOp::Add(0), &[wire[input_to_fix]], )?[0]; wire = patch.wire_node(&node.name, EinSum { axes: new_axes, ..op.clone() }, &wire)?; } } else { let repr = op.axes.available_label(); let new_axes = op .axes .clone() .with_extra_axis(repr, InOut::In(input_to_fix), 0)? .with_extra_axis('$', InOut::Out(0), 0)? .linking(repr, '$')?; wire[input_to_fix] = patch.wire_node( format!("{name}.add_{label}"), AxisOp::Add(0), &[wire[input_to_fix]], )?[0]; wire = patch.wire_node( format!("{name}.einsum"), EinSum { axes: new_axes, ..op.clone() }, &wire, )?; wire = patch.wire_node(&node.name, AxisOp::Rm(0), &wire)?; } patch.shunt_outside(model, node.id.into(), wire[0])?; Ok(patch) } fn wire_axes_fix( patch: &mut TypedModelPatch, name: &str, var: &str, mapping: &AxesMapping, mut outlet: TVec<OutletId>, ) -> TractResult<TVec<OutletId>> { for (ix, axis_op) in mapping.translate_to_axis_ops()?.into_iter().enumerate() { outlet = patch.wire_node(format!("{name}.fix_{var}.{ix})"), axis_op, &outlet)?; } Ok(outlet) } fn dequant( op: &EinSum, model: &TypedModel, node: &TypedNode, (_, k_axis, _): (&Axis, &Axis, &Axis), ) -> TractResult<Option<TypedModelPatch>> { let name = &node.name; let mut patch = TypedModelPatch::new("Dequantizing einsum"); let taps = patch.taps(model, &node.inputs)?; let [a, b, bias, mut a0, mut a_scale, mut b0, b_scale, c0, c_scale] = *taps else { bail!("Expect exactly 9 inputs") }; if !patch.outlet_fact(a_scale)?.shape.volume().is_one() { let q_axis_in_output = op.axes.axis((InOut::In(4), 0))?.outputs[0][0]; let output_rank = node.outputs[0].fact.rank(); for i in 1..(output_rank - q_axis_in_output) { a_scale = patch.wire_node( format!("{name}.a_scale_axis_fix_{i}"), AxisOp::Add(i), &[a_scale], )?[0]; } } let a = wire_offset_u8_as_i8(&mut patch, &node.name, a, "a", &mut a0, "a0")?; let b = wire_offset_u8_as_i8(&mut patch, &node.name, b, "b", &mut b0, "b0")?; let mut output = patch.wire_node( &node.name, EinSum { q_params: None, axes: op.axes.extract_sub_mapping(&[0, 1], &[0])?, operating_dt: op.operating_dt, }, &[a, b], )?; let a_i32 = patch.wire_node(format!("{name}.a_as_i32"), cast(i32::datum_type()), &[a])?[0]; let b_i32 = patch.wire_node(format!("{name}.b_as_i32"), cast(i32::datum_type()), &[b])?[0]; let sum_a = patch.wire_node( format!("{name}.sum_a"), Reduce::new(tvec!(k_axis.inputs[0][0]), Reducer::Sum), &[a_i32], )?; let sum_b = patch.wire_node( format!("{name}.sum_b"), Reduce::new(tvec!(k_axis.inputs[1][0]), Reducer::Sum), &[b_i32], )?; let sum_a = wire_axes_fix(&mut patch, name, "sum_a", &op.axes.extract_sub_mapping(&[0], &[0])?, sum_a)?; let sum_b = wire_axes_fix(&mut patch, name, "sum_b", &op.axes.extract_sub_mapping(&[1], &[0])?, sum_b)?; let bias = tvec!(bias); let bias = wire_axes_fix(&mut patch, name, "bias", &op.axes.extract_sub_mapping(&[2], &[0])?, bias)?; let abc_scale = combine_scales(&mut patch, name, a_scale, b_scale, c_scale)?; output = patch.wire_node(format!("{name}.add_bias"), add(), &[output[0], bias[0]])?; let k = model.outlet_fact(node.inputs[0])?.shape[k_axis.inputs[0][0]].clone(); let output = compensate_zero_points(&mut patch, name, output[0], k, a0, b0, sum_a[0], sum_b[0]) .context("Zero point compensation")?; let output = requant(&mut patch, name, output, op.q_params.unwrap(), abc_scale, c0)?; patch.shunt_outside(model, node.id.into(), output)?; Ok(Some(patch)) } fn lir_mat_mul_unary( op: &EinSum, model: &TypedModel, node: &TypedNode, (m_axis, k_axis, n_axis): (&Axis, &Axis, &Axis), ) -> TractResult<Option<TypedModelPatch>> { let input_facts = model.node_input_facts(node.id)?; let a_m = m_axis.inputs[0][0]; let a_k = k_axis.inputs[0][0]; let b_n = n_axis.inputs[1][0]; let b_k = k_axis.inputs[1][0]; let c_m = m_axis.outputs[0][0]; let c_n = n_axis.outputs[0][0]; let m = &input_facts[0].shape[a_m]; let k = &input_facts[0].shape[a_k]; let n = &input_facts[1].shape[b_n]; if m < n { let expr = op .axes .iter_all_axes() .map(|axis| { let mut axis = axis.clone(); axis.inputs.swap(0, 1); axis }) .collect::<TVec<Axis>>(); return TypedModelPatch::replace_single_op( model, node, &[node.inputs[1], node.inputs[0]], EinSum { axes: AxesMapping::new(node.inputs.len(), 1, expr)?, ..op.clone() }, ) .map(Some); } let a_dt = input_facts[0].datum_type; let b_dt = input_facts[1].datum_type; let dt = op.operating_dt; let mmm = tract_linalg::ops() .mmm(a_dt, b_dt, dt, m.to_usize().ok(), k.to_usize().ok(), n.to_usize().ok()) .unwrap(); let name = &node.name; let mut patch = TypedModelPatch::new("Einsum to LirMatMulUnary"); let a = patch.tap_model(model, node.inputs[0])?; let b = patch.tap_model(model, node.inputs[1])?; let pack_a = MatMatMulPack { packer: mmm.a_pack(), k_axis: a_k, mn_axis: a_m }; let pack_b = MatMatMulPack { packer: mmm.b_pack(), k_axis: b_k, mn_axis: b_n }; let pa = patch.wire_node(format!("{name}.pack_a"), pack_a, &[a])?[0]; let pb = patch.wire_node(format!("{name}.pack_b"), pack_b, &[b])?[0]; let mut c_to_a_axis_mapping = tvec!(); let mut c_to_b_axis_mapping = tvec!(); for axis in op.axes.iter_all_axes().filter(|&axis| ![m_axis, k_axis, n_axis].contains(&axis)) { if let (&[c], &[a]) = (&*axis.outputs[0], &*axis.inputs[0]) { if input_facts[0].shape[a] != 1.to_dim() { let a = a - (a > a_m) as usize - (a > a_k) as usize; c_to_a_axis_mapping.push((c, a)); } } if let (&[c], &[b]) = (&*axis.outputs[0], &*axis.inputs[1]) { if input_facts[1].shape[b] != 1.to_dim() { let b = b - (b > b_n) as usize - (b > b_k) as usize; c_to_b_axis_mapping.push((c, b)); } } } let c_fact = op.output_facts(&input_facts)?.remove(0); let name = &node.name; let geo = AddMatMulGeometry { k: k.to_dim(), a_storage: None, b_storage: None, mmm: mmm.clone(), c_to_a_axis_mapping: MapOutputAxisToInput(c_to_a_axis_mapping), c_to_b_axis_mapping: MapOutputAxisToInput(c_to_b_axis_mapping), }; let output = unsafe { mmm.c_view(c_m, c_n) }; let lir = LirMatMulUnary::new( mmm, c_fact, c_m, c_n, vec![ProtoFusedSpec::AddMatMul(geo, 0, 1), ProtoFusedSpec::Store(output)], ) .context("Creating LirMatMulUnary")?; let output = patch.wire_node(name, lir, &[pa, pb])?[0]; patch.shunt_outside(model, node.id.into(), output)?; Ok(Some(patch)) }
{ let input_facts = model.node_input_facts(node.id)?; let input_shapes: TVec<&[TDim]> = input_facts.iter().map(|f| &*f.shape).collect(); let output_shape = super::eval::output_shape(&op.axes, &input_shapes); let candidate_k_axes: TVec<&Axis> = op .axes .iter_all_axes() // Filter possible candidates (should be one time in each inputs but not in output) .filter(|a| { a.inputs[0].len() == 1 && a.inputs[1].len() == 1 && a.outputs[0].len() == 0 && input_facts[0].shape[a.inputs[0][0]] == input_facts[1].shape[a.inputs[1][0]] }) .collect(); let non_trivial_k_axis = candidate_k_axes .iter() .filter(|a| input_facts[0].shape[a.inputs[0][0]] > 1.to_dim()) .collect::<TVec<_>>(); let k_axis = if non_trivial_k_axis.len() > 1 { // TODO: handle case where multiple consecutive k in the same order in both input. bail!("Multiple k-axis candidate found"); } else { non_trivial_k_axis.get(0).copied().or_else(|| candidate_k_axes.get(0)).copied() }; let Some(k_axis) = k_axis else { return Ok(AxesOrPatch::Patch(inject_k_axis(op, model, node)?)); }; let m_axis = op .axes .iter_all_axes() .filter(|a| { a.inputs[0].len() == 1 && (a.inputs[1].len() == 0 || input_facts[1].shape[a.inputs[1][0]].is_one()) && a.outputs[0].len() == 1 }) .max_by_key(|a| &output_shape[a.outputs[0][0]]); let Some(m_axis) = m_axis else { return Ok(AxesOrPatch::Patch(inject_m_or_n_axis(op, model, node, false, &[k_axis])?)); }; let n_axis = op .axes .iter_all_axes() .filter(|a| { (a.inputs[0].len() == 0 || input_facts[0].shape[a.inputs[0][0]].is_one()) && a.inputs[1].len() == 1 && a.outputs[0].len() == 1 }) .max_by_key(|a| &output_shape[a.outputs[0][0]]); let Some(n_axis) = n_axis else { return Ok(AxesOrPatch::Patch(inject_m_or_n_axis( op, model, node, true, &[k_axis, m_axis], )?)); }; for axis in op.axes.iter_all_axes() { let one = TDim::one(); let in_left = axis.inputs[0].first().map(|pos| &input_facts[0].shape[*pos]).unwrap_or(&one) != &one; let in_right = axis.inputs[1].first().map(|pos| &input_facts[1].shape[*pos]).unwrap_or(&one) != &one; let in_out = axis.outputs[0].first().map(|pos| &output_shape[*pos]).unwrap_or(&one) != &one; if (in_left ^ in_right) && !in_out { return Ok(AxesOrPatch::NotAMatMul(axis)); } } Ok(AxesOrPatch::Axes(m_axis, k_axis, n_axis)) }
identifier_body
codegen.rs
use super::*; use crate::ops::cast::cast; use crate::ops::math::add; use crate::ops::matmul::lir_unary::{ AddMatMulGeometry, LirMatMulUnary, MapOutputAxisToInput, ProtoFusedSpec, }; use crate::ops::matmul::mir_quant::{ combine_scales, compensate_zero_points, requant, wire_offset_u8_as_i8, }; use crate::ops::matmul::pack::MatMatMulPack; use crate::ops::nn::{Reduce, Reducer}; pub enum AxesOrPatch<'a> { Axes(&'a Axis, &'a Axis, &'a Axis), Patch(TypedModelPatch), NotAMatMul(&'a Axis), } pub(crate) fn codegen( op: &EinSum, model: &TypedModel, node: &TypedNode, ) -> TractResult<Option<TypedModelPatch>> { if (op.q_params.is_none() && node.inputs.len() != 2) || (op.q_params.is_some() && node.inputs.len() != 9) { return Ok(None); } let (m_axis, k_axis, n_axis) = match ensure_mkn_axes(op, model, node)? { AxesOrPatch::Axes(m, k, n) => (m, k, n), AxesOrPatch::Patch(p) => return Ok(Some(p)), AxesOrPatch::NotAMatMul(_) => return Ok(None), }; if op.q_params.is_none() { lir_mat_mul_unary(op, model, node, (m_axis, k_axis, n_axis)) .context("Translating to LirMatMul") } else { dequant(op, model, node, (m_axis, k_axis, n_axis)).context("Dequantize") } } pub(super) fn ensure_mkn_axes<'a>( op: &'a EinSum, model: &TypedModel, node: &TypedNode, ) -> TractResult<AxesOrPatch<'a>> { let input_facts = model.node_input_facts(node.id)?; let input_shapes: TVec<&[TDim]> = input_facts.iter().map(|f| &*f.shape).collect(); let output_shape = super::eval::output_shape(&op.axes, &input_shapes); let candidate_k_axes: TVec<&Axis> = op .axes .iter_all_axes() // Filter possible candidates (should be one time in each inputs but not in output) .filter(|a| { a.inputs[0].len() == 1 && a.inputs[1].len() == 1 && a.outputs[0].len() == 0 && input_facts[0].shape[a.inputs[0][0]] == input_facts[1].shape[a.inputs[1][0]] }) .collect(); let non_trivial_k_axis = candidate_k_axes .iter() .filter(|a| input_facts[0].shape[a.inputs[0][0]] > 1.to_dim()) .collect::<TVec<_>>(); let k_axis = if non_trivial_k_axis.len() > 1 { // TODO: handle case where multiple consecutive k in the same order in both input. bail!("Multiple k-axis candidate found"); } else { non_trivial_k_axis.get(0).copied().or_else(|| candidate_k_axes.get(0)).copied() }; let Some(k_axis) = k_axis else { return Ok(AxesOrPatch::Patch(inject_k_axis(op, model, node)?)); }; let m_axis = op .axes .iter_all_axes() .filter(|a| { a.inputs[0].len() == 1 && (a.inputs[1].len() == 0 || input_facts[1].shape[a.inputs[1][0]].is_one()) && a.outputs[0].len() == 1 }) .max_by_key(|a| &output_shape[a.outputs[0][0]]); let Some(m_axis) = m_axis else { return Ok(AxesOrPatch::Patch(inject_m_or_n_axis(op, model, node, false, &[k_axis])?)); }; let n_axis = op .axes .iter_all_axes() .filter(|a| { (a.inputs[0].len() == 0 || input_facts[0].shape[a.inputs[0][0]].is_one()) && a.inputs[1].len() == 1 && a.outputs[0].len() == 1 }) .max_by_key(|a| &output_shape[a.outputs[0][0]]); let Some(n_axis) = n_axis else { return Ok(AxesOrPatch::Patch(inject_m_or_n_axis( op, model, node, true, &[k_axis, m_axis], )?)); }; for axis in op.axes.iter_all_axes() { let one = TDim::one(); let in_left = axis.inputs[0].first().map(|pos| &input_facts[0].shape[*pos]).unwrap_or(&one) != &one; let in_right = axis.inputs[1].first().map(|pos| &input_facts[1].shape[*pos]).unwrap_or(&one) != &one; let in_out = axis.outputs[0].first().map(|pos| &output_shape[*pos]).unwrap_or(&one) != &one; if (in_left ^ in_right) && !in_out { return Ok(AxesOrPatch::NotAMatMul(axis)); } } Ok(AxesOrPatch::Axes(m_axis, k_axis, n_axis)) } pub(super) fn inject_k_axis( op: &EinSum, model: &TypedModel, node: &TypedNode, ) -> TractResult<TypedModelPatch> { let mut new_axes = op.axes.clone(); let name = &node.name; let mut patch = TypedModelPatch::new("inject k axis"); let mut wire = patch.taps(model, &node.inputs)?; let repr = new_axes.available_label(); new_axes = new_axes.with_extra_axis(repr, InOut::In(0), 0)?.with_extra_axis_occurency( repr, InOut::In(1), 0, )?; wire[0] = patch.wire_node(format!("{name}.add_k.0"), AxisOp::Add(0), &[wire[0]])?[0]; wire[1] = patch.wire_node(format!("{name}.add_k.1"), AxisOp::Add(0), &[wire[1]])?[0]; wire = patch.wire_node(&node.name, EinSum { axes: new_axes, ..op.clone() }, &wire)?; patch.shunt_outside(model, node.id.into(), wire[0])?; Ok(patch) } pub(super) fn inject_m_or_n_axis( op: &EinSum, model: &TypedModel, node: &TypedNode, is_n: bool, exclude: &[&Axis], ) -> TractResult<TypedModelPatch> { let input_to_fix = is_n as usize; let label = if is_n { "n" } else { "m" }; let input_facts = model.node_input_facts(node.id)?; let quasi_m_or_n_axis = op.axes.iter_all_axes().filter(|a| !exclude.contains(a)).find(|a| { (a.inputs[1 - input_to_fix].len() == 0 || input_facts[1 - input_to_fix].shape[a.inputs[1 - input_to_fix][0]].is_one()) && (a.inputs[input_to_fix].len() == 1 || a.outputs[0].len() == 1) }); let name = &node.name; let mut patch = TypedModelPatch::new("Injecting m or n axis"); let mut wire = patch.taps(model, &node.inputs)?; if let Some(axis) = quasi_m_or_n_axis { if axis.inputs[input_to_fix].len() == 1 { let new_axes = op.axes.clone().with_extra_axis('$', InOut::Out(0), 0)?.linking(axis.repr, '$')?; wire = patch.wire_node( format!("{name}.einsum"), EinSum { axes: new_axes, ..op.clone() }, &wire, )?; wire = patch.wire_node(&node.name, AxisOp::Rm(0), &wire)?; } else { let new_axes = op .axes .clone() .with_extra_axis('$', InOut::In(input_to_fix), 0)? .linking(axis.repr, '$')?; wire[input_to_fix] = patch.wire_node( format!("{name}.add_{label}"), AxisOp::Add(0), &[wire[input_to_fix]], )?[0]; wire = patch.wire_node(&node.name, EinSum { axes: new_axes, ..op.clone() }, &wire)?; } } else { let repr = op.axes.available_label(); let new_axes = op .axes .clone() .with_extra_axis(repr, InOut::In(input_to_fix), 0)? .with_extra_axis('$', InOut::Out(0), 0)? .linking(repr, '$')?; wire[input_to_fix] = patch.wire_node( format!("{name}.add_{label}"), AxisOp::Add(0), &[wire[input_to_fix]], )?[0]; wire = patch.wire_node( format!("{name}.einsum"), EinSum { axes: new_axes, ..op.clone() }, &wire, )?; wire = patch.wire_node(&node.name, AxisOp::Rm(0), &wire)?; } patch.shunt_outside(model, node.id.into(), wire[0])?; Ok(patch) } fn wire_axes_fix( patch: &mut TypedModelPatch, name: &str, var: &str, mapping: &AxesMapping, mut outlet: TVec<OutletId>, ) -> TractResult<TVec<OutletId>> { for (ix, axis_op) in mapping.translate_to_axis_ops()?.into_iter().enumerate() { outlet = patch.wire_node(format!("{name}.fix_{var}.{ix})"), axis_op, &outlet)?; } Ok(outlet) } fn dequant( op: &EinSum, model: &TypedModel, node: &TypedNode, (_, k_axis, _): (&Axis, &Axis, &Axis), ) -> TractResult<Option<TypedModelPatch>> { let name = &node.name; let mut patch = TypedModelPatch::new("Dequantizing einsum"); let taps = patch.taps(model, &node.inputs)?; let [a, b, bias, mut a0, mut a_scale, mut b0, b_scale, c0, c_scale] = *taps else { bail!("Expect exactly 9 inputs") }; if !patch.outlet_fact(a_scale)?.shape.volume().is_one() { let q_axis_in_output = op.axes.axis((InOut::In(4), 0))?.outputs[0][0]; let output_rank = node.outputs[0].fact.rank();
)?[0]; } } let a = wire_offset_u8_as_i8(&mut patch, &node.name, a, "a", &mut a0, "a0")?; let b = wire_offset_u8_as_i8(&mut patch, &node.name, b, "b", &mut b0, "b0")?; let mut output = patch.wire_node( &node.name, EinSum { q_params: None, axes: op.axes.extract_sub_mapping(&[0, 1], &[0])?, operating_dt: op.operating_dt, }, &[a, b], )?; let a_i32 = patch.wire_node(format!("{name}.a_as_i32"), cast(i32::datum_type()), &[a])?[0]; let b_i32 = patch.wire_node(format!("{name}.b_as_i32"), cast(i32::datum_type()), &[b])?[0]; let sum_a = patch.wire_node( format!("{name}.sum_a"), Reduce::new(tvec!(k_axis.inputs[0][0]), Reducer::Sum), &[a_i32], )?; let sum_b = patch.wire_node( format!("{name}.sum_b"), Reduce::new(tvec!(k_axis.inputs[1][0]), Reducer::Sum), &[b_i32], )?; let sum_a = wire_axes_fix(&mut patch, name, "sum_a", &op.axes.extract_sub_mapping(&[0], &[0])?, sum_a)?; let sum_b = wire_axes_fix(&mut patch, name, "sum_b", &op.axes.extract_sub_mapping(&[1], &[0])?, sum_b)?; let bias = tvec!(bias); let bias = wire_axes_fix(&mut patch, name, "bias", &op.axes.extract_sub_mapping(&[2], &[0])?, bias)?; let abc_scale = combine_scales(&mut patch, name, a_scale, b_scale, c_scale)?; output = patch.wire_node(format!("{name}.add_bias"), add(), &[output[0], bias[0]])?; let k = model.outlet_fact(node.inputs[0])?.shape[k_axis.inputs[0][0]].clone(); let output = compensate_zero_points(&mut patch, name, output[0], k, a0, b0, sum_a[0], sum_b[0]) .context("Zero point compensation")?; let output = requant(&mut patch, name, output, op.q_params.unwrap(), abc_scale, c0)?; patch.shunt_outside(model, node.id.into(), output)?; Ok(Some(patch)) } fn lir_mat_mul_unary( op: &EinSum, model: &TypedModel, node: &TypedNode, (m_axis, k_axis, n_axis): (&Axis, &Axis, &Axis), ) -> TractResult<Option<TypedModelPatch>> { let input_facts = model.node_input_facts(node.id)?; let a_m = m_axis.inputs[0][0]; let a_k = k_axis.inputs[0][0]; let b_n = n_axis.inputs[1][0]; let b_k = k_axis.inputs[1][0]; let c_m = m_axis.outputs[0][0]; let c_n = n_axis.outputs[0][0]; let m = &input_facts[0].shape[a_m]; let k = &input_facts[0].shape[a_k]; let n = &input_facts[1].shape[b_n]; if m < n { let expr = op .axes .iter_all_axes() .map(|axis| { let mut axis = axis.clone(); axis.inputs.swap(0, 1); axis }) .collect::<TVec<Axis>>(); return TypedModelPatch::replace_single_op( model, node, &[node.inputs[1], node.inputs[0]], EinSum { axes: AxesMapping::new(node.inputs.len(), 1, expr)?, ..op.clone() }, ) .map(Some); } let a_dt = input_facts[0].datum_type; let b_dt = input_facts[1].datum_type; let dt = op.operating_dt; let mmm = tract_linalg::ops() .mmm(a_dt, b_dt, dt, m.to_usize().ok(), k.to_usize().ok(), n.to_usize().ok()) .unwrap(); let name = &node.name; let mut patch = TypedModelPatch::new("Einsum to LirMatMulUnary"); let a = patch.tap_model(model, node.inputs[0])?; let b = patch.tap_model(model, node.inputs[1])?; let pack_a = MatMatMulPack { packer: mmm.a_pack(), k_axis: a_k, mn_axis: a_m }; let pack_b = MatMatMulPack { packer: mmm.b_pack(), k_axis: b_k, mn_axis: b_n }; let pa = patch.wire_node(format!("{name}.pack_a"), pack_a, &[a])?[0]; let pb = patch.wire_node(format!("{name}.pack_b"), pack_b, &[b])?[0]; let mut c_to_a_axis_mapping = tvec!(); let mut c_to_b_axis_mapping = tvec!(); for axis in op.axes.iter_all_axes().filter(|&axis| ![m_axis, k_axis, n_axis].contains(&axis)) { if let (&[c], &[a]) = (&*axis.outputs[0], &*axis.inputs[0]) { if input_facts[0].shape[a] != 1.to_dim() { let a = a - (a > a_m) as usize - (a > a_k) as usize; c_to_a_axis_mapping.push((c, a)); } } if let (&[c], &[b]) = (&*axis.outputs[0], &*axis.inputs[1]) { if input_facts[1].shape[b] != 1.to_dim() { let b = b - (b > b_n) as usize - (b > b_k) as usize; c_to_b_axis_mapping.push((c, b)); } } } let c_fact = op.output_facts(&input_facts)?.remove(0); let name = &node.name; let geo = AddMatMulGeometry { k: k.to_dim(), a_storage: None, b_storage: None, mmm: mmm.clone(), c_to_a_axis_mapping: MapOutputAxisToInput(c_to_a_axis_mapping), c_to_b_axis_mapping: MapOutputAxisToInput(c_to_b_axis_mapping), }; let output = unsafe { mmm.c_view(c_m, c_n) }; let lir = LirMatMulUnary::new( mmm, c_fact, c_m, c_n, vec![ProtoFusedSpec::AddMatMul(geo, 0, 1), ProtoFusedSpec::Store(output)], ) .context("Creating LirMatMulUnary")?; let output = patch.wire_node(name, lir, &[pa, pb])?[0]; patch.shunt_outside(model, node.id.into(), output)?; Ok(Some(patch)) }
for i in 1..(output_rank - q_axis_in_output) { a_scale = patch.wire_node( format!("{name}.a_scale_axis_fix_{i}"), AxisOp::Add(i), &[a_scale],
random_line_split
interface.py
# Interfaces to the Ceilometer API # import ceilometer # Brings in HTTP support import requests import json import urllib from copy import copy from collections import defaultdict # import datetime # Provides authentication against Openstack from keystoneclient.v2_0 import client as KeystoneClient # Provides hooks to ceilometer, which we need for data. from ceilometerclient.v2.client import Client as ceilometer # from .models import usage from .models import Session, usage from sqlalchemy import create_engine # from .models.usage import Usage from .models import resources, tenants, usage # from .models.tenants import Tenant # Date format Ceilometer uses # 2013-07-03T13:34:17 # which is, as an strftime: # timestamp = datetime.strptime(res["timestamp"], "%Y-%m-%dT%H:%M:%S.%f") # or # timestamp = datetime.strptime(res["timestamp"], "%Y-%m-%dT%H:%M:%S") # Most of the time we use date_format date_format = "%Y-%m-%dT%H:%M:%S" # Sometimes things also have milliseconds, so we look for that too. # Because why not be annoying in all the ways? other_date_format = "%Y-%m-%dT%H:%M:%S.%f" def get_meter(meter, start, end, auth): # Meter is a href; in this case, it has a set of fields with it already. # print meter.link # print dir(meter) date_fields = [{ "field": "timestamp", "op": "ge", "value": start.strftime(date_format) }, { "field": "timestamp", "op": "lt", "value": end.strftime(date_format) } ] fields = [] for field in date_fields: fields.append( ("q.field", field["field"]) ) fields.append( ("q.op", field["op"]) ) fields.append( ("q.value", field["value"])) # Combine. url = "&".join((meter.link, urllib.urlencode(fields) )) r = requests.get( meter.link, headers={ "X-Auth-Token": auth, "Content-Type":"application/json"} ) return json.loads(r.text) class NotFound(BaseException): pass class keystone(KeystoneClient.Client): def tenant_by_name(self, name): authenticator = self.auth_url url = "%(url)s/tenants?%(query)s" % { "url": authenticator, "query": urllib.urlencode({"name":name}) } r = requests.get(url, headers={ "X-Auth-Token": self.auth_token, "Content-Type": "application/json" }) if r.ok: data = json.loads(r.text) assert data return data else: if r.status_code == 404: # couldn't find it raise NotFound class Artifice(object): """Produces billable artifacts""" def __init__(self, config): super(Artifice, self).__init__() self.config = config # This is the Keystone client connection, which provides our # OpenStack authentication self.auth = keystone( username= config["openstack"]["username"], password= config["openstack"]["password"], tenant_name= config["openstack"]["default_tenant"], auth_url= config["openstack"]["authentication_url"] ) conn_string = 'postgresql://%(username)s:%(password)s@%(host)s:%(port)s/%(database)s' % { "username": config["database"]["username"], "password": config["database"]["password"], "host": config["database"]["host"], "port": config["database"]["port"], "database": config["database"]["database"] } engine = create_engine(conn_string) Session.configure(bind=engine) self.session = Session() self.artifice = None self.ceilometer = ceilometer( self.config["ceilometer"]["host"], # Uses a lambda as ceilometer apparently wants to use it as a callable? token=lambda: self.auth.auth_token ) self._tenancy = None def host_to_dc(self, host): """ :param host: The name to use. :type host: str. :returns: str -- The datacenter corresponding to this host. """ # :raises: AttributeError, KeyError # How does this get implemented ? Should there be a module injection? return host # For the moment, passthrough # TODO: FIXME. def tenant(self, name): """ Returns a Tenant object describing the specified Tenant by name, or raises a NotFound error. """ # Returns a Tenant object for the given name. # Uses Keystone API to perform a direct name lookup, # as this is expected to work via name. data = self.auth.tenant_by_name(name) t = Tenant(data["tenant"], self) return t @property def tenants(self): """All the tenants in our system""" # print "tenant list is %s" % self.auth.tenants.list() if not self._tenancy:
return self._tenancy class Tenant(object): def __init__(self, tenant, conn): self.tenant = tenant # Conn is the niceometer object we were instanced from self.conn = conn self._meters = set() self._resources = None self.invoice_type = None # Invoice type needs to get set from the config, which is # part of the Artifice setup above. def __getitem__(self, item): try: return getattr(self.tenant, item) except AttributeError: try: return self.tenant[item] except KeyError: raise KeyError("No such key '%s' in tenant" % item) def __getattr__(self, attr): if attr not in self.tenant: return object.__getattribute__(self, attr) # return super(Tenant, self).__getattr__(attr) return self.tenant[attr] def invoice(self, start, end): """ Creates a new Invoice. Invoices are an Artifice datamodel that represent a set of billable entries assigned to a client on a given Date. An Invoice offers very little of its own opinions, requiring a backend plugin to operate. @returns: invoice """ if self.invoice_type is None: invoice_type = self.conn.config["main"]["invoice:object"] if ":" not in invoice_type: raise AttributeError("Invoice configuration incorrect! %s" % invoice_type) module, call = invoice_type.split(":") _package = __import__(module, globals(), locals(), [ call ]) funct = getattr(_package, call) self.invoice_type = funct config = self.conn.config["invoice_object"] invoice = self.invoice_type(self, config) return invoice def resources(self, start, end): if not self._resources: date_fields = [{ "field": "timestamp", "op": "ge", "value": start.strftime(date_format) }, { "field": "timestamp", "op": "lt", "value": end.strftime(date_format) }, { "field": "project_id", "op": "eq", "value": self.tenant["id"] }, ] # Sets up our resources as Ceilometer objects. # That's cool, I think. self._resources = self.conn.ceilometer.resources.list(date_fields) return self._resources # def usage(self, start, end, section=None): def usage(self, start, end): """ Usage is the meat of Artifice, returning a dict of location to sub-information """ # Returns a usage dict, based on regions. vms = {} vm_to_region = {} ports = {} usage_by_dc = {} writing_to = None vms = [] networks = [] storage = [] volumes = [] # Object storage is mapped by project_id for resource in self.resources(start, end): # print dir(resource) rels = [link["rel"] for link in resource.links if link["rel"] != 'self' ] if "storage.objects" in rels: # Unknown how this data layout happens yet. storage.append(Resource(resource, self.conn)) pass elif "network" in rels: # Have we seen the VM that owns this yet? networks.append(Resource(resource , self.conn)) elif "volumne" in rels: volumes.append( Resource(resource, self.conn) ) elif 'instance' in rels: vms.append(Resource(resource, self.conn )) datacenters = {} region_tmpl = { "vms": vms, "network": networks, "objects": storage, "volumes": volumes } return Usage(region_tmpl, start, end, self.conn) class Usage(object): """ This is a dict-like object containing all the datacenters and meters available in those datacenters. """ def __init__(self, contents, start, end, conn): self.contents = contents self.start = start self.end = end self.conn = conn self._vms = [] self._objects = [] self._volumes = [] # Replaces all the internal references with better references to # actual metered values. # self._replace() @property def vms(self): if not self._vms: vms = [] for vm in self.contents["vms"]: VM = resources.VM(vm, self.start, self.end) md = vm["metadata"] host = md["host"] VM.location = self.conn.host_to_dc( vm["metadata"]["host"] ) vms.append(VM) self._vms = vms return self._vms @property def objects(self): if not self._objects: objs = [] for object_ in self.contents["objects"]: obj = resources.Object(object_, self.start, self.end) objs.append(obj) self._objs = objs return self._objs @property def volumes(self): if not self._volumes: objs = [] for obj in self.contents["volumes"]: obj = resources.Volume(obj, self.start, self.end) objs.append(obj) self._volumes = objs return self._volumes # def __getitem__(self, item): # return self.contents[item] def __iter__(self): return self def next(self): # pass keys = self.contents.keys() for key in keys: yield key raise StopIteration() def save(self): """ Iterate the list of things; save them to DB. """ for vm in self.vms: vm.save() for obj in self.objects: obj.save() for vol in self.volumes: vol.save() class Resource(object): def __init__(self, resource, conn): self.resource = resource self.conn = conn self._meters = {} # def __getitem__(self, item): # return self.resource def meter(self, name, start, end): pass # Return a named meter for meter in self.resource.links: if meter["rel"] == name: m = Meter(self, meter["href"], self.conn, start, end) self._meters[name] = m return m raise AttributeError("no such meter %s" % name) def __getitem__(self, name): # print name # print self.resource # print self.resource[name] return getattr(self.resource, name) # return self.resource.name @property def meters(self): if not self._meters: meters = [] for link in self.resource["links"]: if link["rel"] == "self": continue meter = Meter(self, link, self.conn) meters.append(meter) self._meters = meters return self._meters class Meter(object): def __init__(self, resource, link, conn, start=None, end=None): self.resource = resource self.link = link self.conn = conn self.start = start self.end = end # self.meter = meter def __getitem__(self, x): if isinstance(x, slice): # Woo pass pass def volume(self): return self.usage(self.start, self.end) def usage(self, start, end): """ Usage condenses the entirety of a meter into a single datapoint: A volume value that we can plot as a single number against previous usage for a given range. """ measurements = get_meter(self, start, end, self.conn.auth.auth_token) # return measurements # print measurements self.measurements = defaultdict(list) self.type = set([a["counter_type"] for a in measurements]) if len(self.type) > 1: # That's a big problem raise RuntimeError("Too many types for measurement!") elif len(self.type) == 0: raise RuntimeError("No types!") else: self.type = self.type.pop() type_ = None if self.type == "cumulative": # The usage is the last one, which is the highest value. # # Base it just on the resource ID. # Is this a reasonable thing to do? # Composition style: resource.meter("cpu_util").usage(start, end) == artifact type_ = Cumulative elif self.type == "gauge": type_ = Gauge # return Gauge(self.Resource, ) elif self.type == "delta": type_ = Delta return type_(self.resource, measurements, start, end) def save(self): if not self.start and self.end: raise AttributeError("Needs start and end defined to save") self.volume().save() class Artifact(object): """ Provides base artifact controls; generic typing information for the artifact structures. """ def __init__(self, resource, usage, start, end): self.resource = resource self.usage = usage self.start = start self.end = end def __getitem__(self, item): if item in self._data: return self._data[item] raise KeyError("no such item %s" % item) def save(self): """ Persists to our database backend. Opinionatedly this is a sql datastore. """ value = self.volume() session = self.resource.conn.session # self.artifice. try: tenant_id = self.resource["tenant_id"] except KeyError: tenant_id = self.resource["project_id"] resource_id = self.resource["resource_id"] tenant = session.query(tenants.Tenant).get(tenant_id) if tenant is None: res = resources.Resource() tenant = tenants.Tenant() tenant.id = tenant_id res.id = resource_id res.tenant = tenant session.add(res) session.add(tenant) else: try: res = session.query(resources.Resource).filter(resources.Resource.id == resource_id)[0] tenant = res.tenant except IndexError: res = resources.Resource() tenant = tenants.Tenant() tenant.id = tenant_id res.id = resource_id res.tenant = tenant session.add(res) session.add(tenant) this_usage = usage.Usage( res, tenant, value, self.start, self.end, ) session.add(this_usage) session.commit() # Persist to Postgres def volume(self): """ Default billable number for this volume """ return sum([x["counter_volume"] for x in self.usage]) class Cumulative(Artifact): def volume(self): measurements = self.usage measurements = sorted( measurements, key= lambda x: x["timestamp"] ) total_usage = measurements[-1]["counter_volume"] - measurements[0]["counter_volume"] return total_usage # Gauge and Delta have very little to do: They are expected only to # exist as "not a cumulative" sort of artifact. class Gauge(Artifact): def volume(self): """ Default billable number for this volume """ # print "Usage is %s" % self.usage usage = sorted(self.usage, key=lambda x: x["timestamp"]) blocks = [] curr = [usage[0]] last = usage[0] try: last["timestamp"] = datetime.datetime.strptime(last["timestamp"], date_format) except ValueError: last["timestamp"] = datetime.datetime.strptime(last["timestamp"], other_date_format) except TypeError: pass for val in usage[1:]: try: val["timestamp"] = datetime.datetime.strptime(val["timestamp"], date_format) except ValueError: val["timestamp"] = datetime.datetime.strptime(val["timestamp"], other_date_format) except TypeError: pass if (val['timestamp'] - last["timestamp"]) > datetime.timedelta(hours=1): blocks.append(curr) curr = [val] last = val else: curr.append(val) # We are now sorted into 1-hour blocks totals = [] for block in blocks: usage = max( [v["counter_volume"] for v in block]) totals.append( usage ) # totals = [max(x, key=lambda val: val["counter_volume"] ) for x in blocks] # totals is now an array of max values per hour for a given month. # print totals return sum(totals) class Delta(Artifact): pass
self._tenancy = {} for tenant in self.auth.tenants.list(): t = Tenant(tenant, self) self._tenancy[t["name"]] = t
conditional_block
interface.py
# Interfaces to the Ceilometer API # import ceilometer # Brings in HTTP support import requests import json import urllib from copy import copy from collections import defaultdict # import datetime # Provides authentication against Openstack from keystoneclient.v2_0 import client as KeystoneClient # Provides hooks to ceilometer, which we need for data. from ceilometerclient.v2.client import Client as ceilometer # from .models import usage from .models import Session, usage from sqlalchemy import create_engine # from .models.usage import Usage from .models import resources, tenants, usage # from .models.tenants import Tenant # Date format Ceilometer uses # 2013-07-03T13:34:17 # which is, as an strftime: # timestamp = datetime.strptime(res["timestamp"], "%Y-%m-%dT%H:%M:%S.%f") # or # timestamp = datetime.strptime(res["timestamp"], "%Y-%m-%dT%H:%M:%S") # Most of the time we use date_format date_format = "%Y-%m-%dT%H:%M:%S" # Sometimes things also have milliseconds, so we look for that too. # Because why not be annoying in all the ways? other_date_format = "%Y-%m-%dT%H:%M:%S.%f" def get_meter(meter, start, end, auth): # Meter is a href; in this case, it has a set of fields with it already. # print meter.link # print dir(meter) date_fields = [{ "field": "timestamp", "op": "ge", "value": start.strftime(date_format) }, { "field": "timestamp", "op": "lt", "value": end.strftime(date_format) } ] fields = [] for field in date_fields: fields.append( ("q.field", field["field"]) ) fields.append( ("q.op", field["op"]) ) fields.append( ("q.value", field["value"])) # Combine. url = "&".join((meter.link, urllib.urlencode(fields) )) r = requests.get( meter.link, headers={ "X-Auth-Token": auth, "Content-Type":"application/json"} ) return json.loads(r.text) class NotFound(BaseException): pass class keystone(KeystoneClient.Client): def tenant_by_name(self, name): authenticator = self.auth_url url = "%(url)s/tenants?%(query)s" % { "url": authenticator, "query": urllib.urlencode({"name":name}) } r = requests.get(url, headers={ "X-Auth-Token": self.auth_token, "Content-Type": "application/json" }) if r.ok: data = json.loads(r.text) assert data return data else: if r.status_code == 404: # couldn't find it raise NotFound class Artifice(object): """Produces billable artifacts""" def __init__(self, config): super(Artifice, self).__init__() self.config = config # This is the Keystone client connection, which provides our # OpenStack authentication self.auth = keystone( username= config["openstack"]["username"], password= config["openstack"]["password"], tenant_name= config["openstack"]["default_tenant"], auth_url= config["openstack"]["authentication_url"] ) conn_string = 'postgresql://%(username)s:%(password)s@%(host)s:%(port)s/%(database)s' % { "username": config["database"]["username"], "password": config["database"]["password"], "host": config["database"]["host"], "port": config["database"]["port"], "database": config["database"]["database"] } engine = create_engine(conn_string) Session.configure(bind=engine) self.session = Session() self.artifice = None self.ceilometer = ceilometer( self.config["ceilometer"]["host"], # Uses a lambda as ceilometer apparently wants to use it as a callable? token=lambda: self.auth.auth_token ) self._tenancy = None def host_to_dc(self, host): """ :param host: The name to use. :type host: str. :returns: str -- The datacenter corresponding to this host. """ # :raises: AttributeError, KeyError # How does this get implemented ? Should there be a module injection? return host # For the moment, passthrough # TODO: FIXME. def tenant(self, name): """ Returns a Tenant object describing the specified Tenant by name, or raises a NotFound error. """ # Returns a Tenant object for the given name. # Uses Keystone API to perform a direct name lookup, # as this is expected to work via name. data = self.auth.tenant_by_name(name) t = Tenant(data["tenant"], self) return t @property def tenants(self): """All the tenants in our system""" # print "tenant list is %s" % self.auth.tenants.list() if not self._tenancy: self._tenancy = {} for tenant in self.auth.tenants.list(): t = Tenant(tenant, self) self._tenancy[t["name"]] = t return self._tenancy class Tenant(object): def __init__(self, tenant, conn): self.tenant = tenant # Conn is the niceometer object we were instanced from self.conn = conn self._meters = set() self._resources = None self.invoice_type = None # Invoice type needs to get set from the config, which is # part of the Artifice setup above. def __getitem__(self, item): try: return getattr(self.tenant, item) except AttributeError: try: return self.tenant[item] except KeyError: raise KeyError("No such key '%s' in tenant" % item) def __getattr__(self, attr): if attr not in self.tenant: return object.__getattribute__(self, attr) # return super(Tenant, self).__getattr__(attr) return self.tenant[attr] def invoice(self, start, end): """ Creates a new Invoice. Invoices are an Artifice datamodel that represent a set of billable entries assigned to a client on a given Date. An Invoice offers very little of its own opinions, requiring a backend plugin to operate. @returns: invoice """ if self.invoice_type is None: invoice_type = self.conn.config["main"]["invoice:object"] if ":" not in invoice_type: raise AttributeError("Invoice configuration incorrect! %s" % invoice_type) module, call = invoice_type.split(":") _package = __import__(module, globals(), locals(), [ call ]) funct = getattr(_package, call) self.invoice_type = funct config = self.conn.config["invoice_object"] invoice = self.invoice_type(self, config) return invoice def resources(self, start, end): if not self._resources: date_fields = [{ "field": "timestamp", "op": "ge", "value": start.strftime(date_format) }, { "field": "timestamp", "op": "lt", "value": end.strftime(date_format) }, { "field": "project_id", "op": "eq", "value": self.tenant["id"] }, ] # Sets up our resources as Ceilometer objects. # That's cool, I think. self._resources = self.conn.ceilometer.resources.list(date_fields) return self._resources # def usage(self, start, end, section=None): def usage(self, start, end): """ Usage is the meat of Artifice, returning a dict of location to sub-information """ # Returns a usage dict, based on regions. vms = {} vm_to_region = {} ports = {} usage_by_dc = {} writing_to = None vms = [] networks = [] storage = [] volumes = [] # Object storage is mapped by project_id for resource in self.resources(start, end): # print dir(resource) rels = [link["rel"] for link in resource.links if link["rel"] != 'self' ] if "storage.objects" in rels: # Unknown how this data layout happens yet. storage.append(Resource(resource, self.conn)) pass elif "network" in rels: # Have we seen the VM that owns this yet? networks.append(Resource(resource , self.conn)) elif "volumne" in rels: volumes.append( Resource(resource, self.conn) ) elif 'instance' in rels: vms.append(Resource(resource, self.conn )) datacenters = {} region_tmpl = { "vms": vms, "network": networks, "objects": storage, "volumes": volumes } return Usage(region_tmpl, start, end, self.conn) class Usage(object): """ This is a dict-like object containing all the datacenters and meters available in those datacenters. """ def __init__(self, contents, start, end, conn): self.contents = contents self.start = start self.end = end self.conn = conn self._vms = [] self._objects = [] self._volumes = [] # Replaces all the internal references with better references to # actual metered values. # self._replace() @property def vms(self): if not self._vms: vms = [] for vm in self.contents["vms"]: VM = resources.VM(vm, self.start, self.end) md = vm["metadata"] host = md["host"] VM.location = self.conn.host_to_dc( vm["metadata"]["host"] ) vms.append(VM) self._vms = vms return self._vms @property def objects(self): if not self._objects: objs = [] for object_ in self.contents["objects"]: obj = resources.Object(object_, self.start, self.end) objs.append(obj) self._objs = objs return self._objs @property def volumes(self): if not self._volumes: objs = [] for obj in self.contents["volumes"]: obj = resources.Volume(obj, self.start, self.end) objs.append(obj) self._volumes = objs return self._volumes # def __getitem__(self, item): # return self.contents[item] def __iter__(self): return self def next(self): # pass keys = self.contents.keys() for key in keys: yield key raise StopIteration() def save(self): """ Iterate the list of things; save them to DB. """ for vm in self.vms: vm.save() for obj in self.objects: obj.save() for vol in self.volumes: vol.save() class Resource(object): def __init__(self, resource, conn): self.resource = resource self.conn = conn self._meters = {} # def __getitem__(self, item): # return self.resource def meter(self, name, start, end): pass # Return a named meter for meter in self.resource.links: if meter["rel"] == name: m = Meter(self, meter["href"], self.conn, start, end) self._meters[name] = m return m raise AttributeError("no such meter %s" % name) def __getitem__(self, name): # print name # print self.resource # print self.resource[name] return getattr(self.resource, name) # return self.resource.name @property def meters(self): if not self._meters: meters = [] for link in self.resource["links"]: if link["rel"] == "self": continue meter = Meter(self, link, self.conn) meters.append(meter) self._meters = meters return self._meters class Meter(object): def __init__(self, resource, link, conn, start=None, end=None): self.resource = resource self.link = link self.conn = conn self.start = start self.end = end # self.meter = meter def __getitem__(self, x): if isinstance(x, slice): # Woo pass pass def volume(self): return self.usage(self.start, self.end) def usage(self, start, end): """ Usage condenses the entirety of a meter into a single datapoint: A volume value that we can plot as a single number against previous usage for a given range. """ measurements = get_meter(self, start, end, self.conn.auth.auth_token) # return measurements # print measurements self.measurements = defaultdict(list) self.type = set([a["counter_type"] for a in measurements]) if len(self.type) > 1: # That's a big problem raise RuntimeError("Too many types for measurement!") elif len(self.type) == 0: raise RuntimeError("No types!") else: self.type = self.type.pop() type_ = None if self.type == "cumulative": # The usage is the last one, which is the highest value. # # Base it just on the resource ID. # Is this a reasonable thing to do? # Composition style: resource.meter("cpu_util").usage(start, end) == artifact type_ = Cumulative elif self.type == "gauge": type_ = Gauge # return Gauge(self.Resource, ) elif self.type == "delta": type_ = Delta return type_(self.resource, measurements, start, end) def save(self): if not self.start and self.end: raise AttributeError("Needs start and end defined to save") self.volume().save() class Artifact(object): """ Provides base artifact controls; generic typing information for the artifact structures. """ def __init__(self, resource, usage, start, end): self.resource = resource self.usage = usage self.start = start self.end = end def __getitem__(self, item): if item in self._data: return self._data[item] raise KeyError("no such item %s" % item) def save(self): """ Persists to our database backend. Opinionatedly this is a sql datastore. """ value = self.volume() session = self.resource.conn.session # self.artifice. try: tenant_id = self.resource["tenant_id"] except KeyError: tenant_id = self.resource["project_id"] resource_id = self.resource["resource_id"] tenant = session.query(tenants.Tenant).get(tenant_id) if tenant is None: res = resources.Resource() tenant = tenants.Tenant() tenant.id = tenant_id res.id = resource_id res.tenant = tenant session.add(res) session.add(tenant) else: try: res = session.query(resources.Resource).filter(resources.Resource.id == resource_id)[0] tenant = res.tenant except IndexError: res = resources.Resource() tenant = tenants.Tenant() tenant.id = tenant_id res.id = resource_id res.tenant = tenant session.add(res) session.add(tenant) this_usage = usage.Usage( res, tenant, value, self.start, self.end, ) session.add(this_usage) session.commit() # Persist to Postgres def volume(self): """ Default billable number for this volume """ return sum([x["counter_volume"] for x in self.usage]) class
(Artifact): def volume(self): measurements = self.usage measurements = sorted( measurements, key= lambda x: x["timestamp"] ) total_usage = measurements[-1]["counter_volume"] - measurements[0]["counter_volume"] return total_usage # Gauge and Delta have very little to do: They are expected only to # exist as "not a cumulative" sort of artifact. class Gauge(Artifact): def volume(self): """ Default billable number for this volume """ # print "Usage is %s" % self.usage usage = sorted(self.usage, key=lambda x: x["timestamp"]) blocks = [] curr = [usage[0]] last = usage[0] try: last["timestamp"] = datetime.datetime.strptime(last["timestamp"], date_format) except ValueError: last["timestamp"] = datetime.datetime.strptime(last["timestamp"], other_date_format) except TypeError: pass for val in usage[1:]: try: val["timestamp"] = datetime.datetime.strptime(val["timestamp"], date_format) except ValueError: val["timestamp"] = datetime.datetime.strptime(val["timestamp"], other_date_format) except TypeError: pass if (val['timestamp'] - last["timestamp"]) > datetime.timedelta(hours=1): blocks.append(curr) curr = [val] last = val else: curr.append(val) # We are now sorted into 1-hour blocks totals = [] for block in blocks: usage = max( [v["counter_volume"] for v in block]) totals.append( usage ) # totals = [max(x, key=lambda val: val["counter_volume"] ) for x in blocks] # totals is now an array of max values per hour for a given month. # print totals return sum(totals) class Delta(Artifact): pass
Cumulative
identifier_name
interface.py
# Interfaces to the Ceilometer API # import ceilometer # Brings in HTTP support import requests import json import urllib from copy import copy from collections import defaultdict # import datetime # Provides authentication against Openstack from keystoneclient.v2_0 import client as KeystoneClient # Provides hooks to ceilometer, which we need for data. from ceilometerclient.v2.client import Client as ceilometer # from .models import usage from .models import Session, usage from sqlalchemy import create_engine # from .models.usage import Usage from .models import resources, tenants, usage # from .models.tenants import Tenant # Date format Ceilometer uses # 2013-07-03T13:34:17 # which is, as an strftime: # timestamp = datetime.strptime(res["timestamp"], "%Y-%m-%dT%H:%M:%S.%f") # or # timestamp = datetime.strptime(res["timestamp"], "%Y-%m-%dT%H:%M:%S") # Most of the time we use date_format date_format = "%Y-%m-%dT%H:%M:%S" # Sometimes things also have milliseconds, so we look for that too. # Because why not be annoying in all the ways? other_date_format = "%Y-%m-%dT%H:%M:%S.%f" def get_meter(meter, start, end, auth): # Meter is a href; in this case, it has a set of fields with it already. # print meter.link # print dir(meter) date_fields = [{ "field": "timestamp", "op": "ge", "value": start.strftime(date_format) }, { "field": "timestamp", "op": "lt", "value": end.strftime(date_format) } ] fields = [] for field in date_fields: fields.append( ("q.field", field["field"]) ) fields.append( ("q.op", field["op"]) ) fields.append( ("q.value", field["value"])) # Combine. url = "&".join((meter.link, urllib.urlencode(fields) )) r = requests.get( meter.link, headers={ "X-Auth-Token": auth, "Content-Type":"application/json"} ) return json.loads(r.text) class NotFound(BaseException): pass class keystone(KeystoneClient.Client): def tenant_by_name(self, name): authenticator = self.auth_url url = "%(url)s/tenants?%(query)s" % { "url": authenticator, "query": urllib.urlencode({"name":name}) } r = requests.get(url, headers={ "X-Auth-Token": self.auth_token, "Content-Type": "application/json" }) if r.ok: data = json.loads(r.text) assert data return data else: if r.status_code == 404: # couldn't find it raise NotFound class Artifice(object): """Produces billable artifacts""" def __init__(self, config): super(Artifice, self).__init__() self.config = config # This is the Keystone client connection, which provides our # OpenStack authentication self.auth = keystone( username= config["openstack"]["username"], password= config["openstack"]["password"], tenant_name= config["openstack"]["default_tenant"], auth_url= config["openstack"]["authentication_url"] ) conn_string = 'postgresql://%(username)s:%(password)s@%(host)s:%(port)s/%(database)s' % { "username": config["database"]["username"], "password": config["database"]["password"], "host": config["database"]["host"], "port": config["database"]["port"], "database": config["database"]["database"] } engine = create_engine(conn_string) Session.configure(bind=engine) self.session = Session() self.artifice = None self.ceilometer = ceilometer( self.config["ceilometer"]["host"], # Uses a lambda as ceilometer apparently wants to use it as a callable? token=lambda: self.auth.auth_token ) self._tenancy = None def host_to_dc(self, host): """ :param host: The name to use. :type host: str. :returns: str -- The datacenter corresponding to this host. """ # :raises: AttributeError, KeyError # How does this get implemented ? Should there be a module injection? return host # For the moment, passthrough # TODO: FIXME. def tenant(self, name): """ Returns a Tenant object describing the specified Tenant by name, or raises a NotFound error. """ # Returns a Tenant object for the given name. # Uses Keystone API to perform a direct name lookup, # as this is expected to work via name. data = self.auth.tenant_by_name(name) t = Tenant(data["tenant"], self) return t @property def tenants(self): """All the tenants in our system""" # print "tenant list is %s" % self.auth.tenants.list() if not self._tenancy: self._tenancy = {} for tenant in self.auth.tenants.list(): t = Tenant(tenant, self) self._tenancy[t["name"]] = t return self._tenancy class Tenant(object):
class Usage(object): """ This is a dict-like object containing all the datacenters and meters available in those datacenters. """ def __init__(self, contents, start, end, conn): self.contents = contents self.start = start self.end = end self.conn = conn self._vms = [] self._objects = [] self._volumes = [] # Replaces all the internal references with better references to # actual metered values. # self._replace() @property def vms(self): if not self._vms: vms = [] for vm in self.contents["vms"]: VM = resources.VM(vm, self.start, self.end) md = vm["metadata"] host = md["host"] VM.location = self.conn.host_to_dc( vm["metadata"]["host"] ) vms.append(VM) self._vms = vms return self._vms @property def objects(self): if not self._objects: objs = [] for object_ in self.contents["objects"]: obj = resources.Object(object_, self.start, self.end) objs.append(obj) self._objs = objs return self._objs @property def volumes(self): if not self._volumes: objs = [] for obj in self.contents["volumes"]: obj = resources.Volume(obj, self.start, self.end) objs.append(obj) self._volumes = objs return self._volumes # def __getitem__(self, item): # return self.contents[item] def __iter__(self): return self def next(self): # pass keys = self.contents.keys() for key in keys: yield key raise StopIteration() def save(self): """ Iterate the list of things; save them to DB. """ for vm in self.vms: vm.save() for obj in self.objects: obj.save() for vol in self.volumes: vol.save() class Resource(object): def __init__(self, resource, conn): self.resource = resource self.conn = conn self._meters = {} # def __getitem__(self, item): # return self.resource def meter(self, name, start, end): pass # Return a named meter for meter in self.resource.links: if meter["rel"] == name: m = Meter(self, meter["href"], self.conn, start, end) self._meters[name] = m return m raise AttributeError("no such meter %s" % name) def __getitem__(self, name): # print name # print self.resource # print self.resource[name] return getattr(self.resource, name) # return self.resource.name @property def meters(self): if not self._meters: meters = [] for link in self.resource["links"]: if link["rel"] == "self": continue meter = Meter(self, link, self.conn) meters.append(meter) self._meters = meters return self._meters class Meter(object): def __init__(self, resource, link, conn, start=None, end=None): self.resource = resource self.link = link self.conn = conn self.start = start self.end = end # self.meter = meter def __getitem__(self, x): if isinstance(x, slice): # Woo pass pass def volume(self): return self.usage(self.start, self.end) def usage(self, start, end): """ Usage condenses the entirety of a meter into a single datapoint: A volume value that we can plot as a single number against previous usage for a given range. """ measurements = get_meter(self, start, end, self.conn.auth.auth_token) # return measurements # print measurements self.measurements = defaultdict(list) self.type = set([a["counter_type"] for a in measurements]) if len(self.type) > 1: # That's a big problem raise RuntimeError("Too many types for measurement!") elif len(self.type) == 0: raise RuntimeError("No types!") else: self.type = self.type.pop() type_ = None if self.type == "cumulative": # The usage is the last one, which is the highest value. # # Base it just on the resource ID. # Is this a reasonable thing to do? # Composition style: resource.meter("cpu_util").usage(start, end) == artifact type_ = Cumulative elif self.type == "gauge": type_ = Gauge # return Gauge(self.Resource, ) elif self.type == "delta": type_ = Delta return type_(self.resource, measurements, start, end) def save(self): if not self.start and self.end: raise AttributeError("Needs start and end defined to save") self.volume().save() class Artifact(object): """ Provides base artifact controls; generic typing information for the artifact structures. """ def __init__(self, resource, usage, start, end): self.resource = resource self.usage = usage self.start = start self.end = end def __getitem__(self, item): if item in self._data: return self._data[item] raise KeyError("no such item %s" % item) def save(self): """ Persists to our database backend. Opinionatedly this is a sql datastore. """ value = self.volume() session = self.resource.conn.session # self.artifice. try: tenant_id = self.resource["tenant_id"] except KeyError: tenant_id = self.resource["project_id"] resource_id = self.resource["resource_id"] tenant = session.query(tenants.Tenant).get(tenant_id) if tenant is None: res = resources.Resource() tenant = tenants.Tenant() tenant.id = tenant_id res.id = resource_id res.tenant = tenant session.add(res) session.add(tenant) else: try: res = session.query(resources.Resource).filter(resources.Resource.id == resource_id)[0] tenant = res.tenant except IndexError: res = resources.Resource() tenant = tenants.Tenant() tenant.id = tenant_id res.id = resource_id res.tenant = tenant session.add(res) session.add(tenant) this_usage = usage.Usage( res, tenant, value, self.start, self.end, ) session.add(this_usage) session.commit() # Persist to Postgres def volume(self): """ Default billable number for this volume """ return sum([x["counter_volume"] for x in self.usage]) class Cumulative(Artifact): def volume(self): measurements = self.usage measurements = sorted( measurements, key= lambda x: x["timestamp"] ) total_usage = measurements[-1]["counter_volume"] - measurements[0]["counter_volume"] return total_usage # Gauge and Delta have very little to do: They are expected only to # exist as "not a cumulative" sort of artifact. class Gauge(Artifact): def volume(self): """ Default billable number for this volume """ # print "Usage is %s" % self.usage usage = sorted(self.usage, key=lambda x: x["timestamp"]) blocks = [] curr = [usage[0]] last = usage[0] try: last["timestamp"] = datetime.datetime.strptime(last["timestamp"], date_format) except ValueError: last["timestamp"] = datetime.datetime.strptime(last["timestamp"], other_date_format) except TypeError: pass for val in usage[1:]: try: val["timestamp"] = datetime.datetime.strptime(val["timestamp"], date_format) except ValueError: val["timestamp"] = datetime.datetime.strptime(val["timestamp"], other_date_format) except TypeError: pass if (val['timestamp'] - last["timestamp"]) > datetime.timedelta(hours=1): blocks.append(curr) curr = [val] last = val else: curr.append(val) # We are now sorted into 1-hour blocks totals = [] for block in blocks: usage = max( [v["counter_volume"] for v in block]) totals.append( usage ) # totals = [max(x, key=lambda val: val["counter_volume"] ) for x in blocks] # totals is now an array of max values per hour for a given month. # print totals return sum(totals) class Delta(Artifact): pass
def __init__(self, tenant, conn): self.tenant = tenant # Conn is the niceometer object we were instanced from self.conn = conn self._meters = set() self._resources = None self.invoice_type = None # Invoice type needs to get set from the config, which is # part of the Artifice setup above. def __getitem__(self, item): try: return getattr(self.tenant, item) except AttributeError: try: return self.tenant[item] except KeyError: raise KeyError("No such key '%s' in tenant" % item) def __getattr__(self, attr): if attr not in self.tenant: return object.__getattribute__(self, attr) # return super(Tenant, self).__getattr__(attr) return self.tenant[attr] def invoice(self, start, end): """ Creates a new Invoice. Invoices are an Artifice datamodel that represent a set of billable entries assigned to a client on a given Date. An Invoice offers very little of its own opinions, requiring a backend plugin to operate. @returns: invoice """ if self.invoice_type is None: invoice_type = self.conn.config["main"]["invoice:object"] if ":" not in invoice_type: raise AttributeError("Invoice configuration incorrect! %s" % invoice_type) module, call = invoice_type.split(":") _package = __import__(module, globals(), locals(), [ call ]) funct = getattr(_package, call) self.invoice_type = funct config = self.conn.config["invoice_object"] invoice = self.invoice_type(self, config) return invoice def resources(self, start, end): if not self._resources: date_fields = [{ "field": "timestamp", "op": "ge", "value": start.strftime(date_format) }, { "field": "timestamp", "op": "lt", "value": end.strftime(date_format) }, { "field": "project_id", "op": "eq", "value": self.tenant["id"] }, ] # Sets up our resources as Ceilometer objects. # That's cool, I think. self._resources = self.conn.ceilometer.resources.list(date_fields) return self._resources # def usage(self, start, end, section=None): def usage(self, start, end): """ Usage is the meat of Artifice, returning a dict of location to sub-information """ # Returns a usage dict, based on regions. vms = {} vm_to_region = {} ports = {} usage_by_dc = {} writing_to = None vms = [] networks = [] storage = [] volumes = [] # Object storage is mapped by project_id for resource in self.resources(start, end): # print dir(resource) rels = [link["rel"] for link in resource.links if link["rel"] != 'self' ] if "storage.objects" in rels: # Unknown how this data layout happens yet. storage.append(Resource(resource, self.conn)) pass elif "network" in rels: # Have we seen the VM that owns this yet? networks.append(Resource(resource , self.conn)) elif "volumne" in rels: volumes.append( Resource(resource, self.conn) ) elif 'instance' in rels: vms.append(Resource(resource, self.conn )) datacenters = {} region_tmpl = { "vms": vms, "network": networks, "objects": storage, "volumes": volumes } return Usage(region_tmpl, start, end, self.conn)
identifier_body
interface.py
# Interfaces to the Ceilometer API # import ceilometer # Brings in HTTP support import requests import json import urllib from copy import copy from collections import defaultdict # import datetime # Provides authentication against Openstack from keystoneclient.v2_0 import client as KeystoneClient # Provides hooks to ceilometer, which we need for data. from ceilometerclient.v2.client import Client as ceilometer # from .models import usage from .models import Session, usage from sqlalchemy import create_engine # from .models.usage import Usage from .models import resources, tenants, usage # from .models.tenants import Tenant # Date format Ceilometer uses # 2013-07-03T13:34:17 # which is, as an strftime: # timestamp = datetime.strptime(res["timestamp"], "%Y-%m-%dT%H:%M:%S.%f") # or # timestamp = datetime.strptime(res["timestamp"], "%Y-%m-%dT%H:%M:%S") # Most of the time we use date_format date_format = "%Y-%m-%dT%H:%M:%S" # Sometimes things also have milliseconds, so we look for that too. # Because why not be annoying in all the ways? other_date_format = "%Y-%m-%dT%H:%M:%S.%f" def get_meter(meter, start, end, auth): # Meter is a href; in this case, it has a set of fields with it already. # print meter.link # print dir(meter) date_fields = [{ "field": "timestamp", "op": "ge", "value": start.strftime(date_format) }, { "field": "timestamp", "op": "lt", "value": end.strftime(date_format) } ] fields = [] for field in date_fields: fields.append( ("q.field", field["field"]) ) fields.append( ("q.op", field["op"]) ) fields.append( ("q.value", field["value"])) # Combine. url = "&".join((meter.link, urllib.urlencode(fields) )) r = requests.get( meter.link, headers={ "X-Auth-Token": auth, "Content-Type":"application/json"} ) return json.loads(r.text) class NotFound(BaseException): pass class keystone(KeystoneClient.Client): def tenant_by_name(self, name): authenticator = self.auth_url url = "%(url)s/tenants?%(query)s" % { "url": authenticator, "query": urllib.urlencode({"name":name}) } r = requests.get(url, headers={ "X-Auth-Token": self.auth_token, "Content-Type": "application/json"
data = json.loads(r.text) assert data return data else: if r.status_code == 404: # couldn't find it raise NotFound class Artifice(object): """Produces billable artifacts""" def __init__(self, config): super(Artifice, self).__init__() self.config = config # This is the Keystone client connection, which provides our # OpenStack authentication self.auth = keystone( username= config["openstack"]["username"], password= config["openstack"]["password"], tenant_name= config["openstack"]["default_tenant"], auth_url= config["openstack"]["authentication_url"] ) conn_string = 'postgresql://%(username)s:%(password)s@%(host)s:%(port)s/%(database)s' % { "username": config["database"]["username"], "password": config["database"]["password"], "host": config["database"]["host"], "port": config["database"]["port"], "database": config["database"]["database"] } engine = create_engine(conn_string) Session.configure(bind=engine) self.session = Session() self.artifice = None self.ceilometer = ceilometer( self.config["ceilometer"]["host"], # Uses a lambda as ceilometer apparently wants to use it as a callable? token=lambda: self.auth.auth_token ) self._tenancy = None def host_to_dc(self, host): """ :param host: The name to use. :type host: str. :returns: str -- The datacenter corresponding to this host. """ # :raises: AttributeError, KeyError # How does this get implemented ? Should there be a module injection? return host # For the moment, passthrough # TODO: FIXME. def tenant(self, name): """ Returns a Tenant object describing the specified Tenant by name, or raises a NotFound error. """ # Returns a Tenant object for the given name. # Uses Keystone API to perform a direct name lookup, # as this is expected to work via name. data = self.auth.tenant_by_name(name) t = Tenant(data["tenant"], self) return t @property def tenants(self): """All the tenants in our system""" # print "tenant list is %s" % self.auth.tenants.list() if not self._tenancy: self._tenancy = {} for tenant in self.auth.tenants.list(): t = Tenant(tenant, self) self._tenancy[t["name"]] = t return self._tenancy class Tenant(object): def __init__(self, tenant, conn): self.tenant = tenant # Conn is the niceometer object we were instanced from self.conn = conn self._meters = set() self._resources = None self.invoice_type = None # Invoice type needs to get set from the config, which is # part of the Artifice setup above. def __getitem__(self, item): try: return getattr(self.tenant, item) except AttributeError: try: return self.tenant[item] except KeyError: raise KeyError("No such key '%s' in tenant" % item) def __getattr__(self, attr): if attr not in self.tenant: return object.__getattribute__(self, attr) # return super(Tenant, self).__getattr__(attr) return self.tenant[attr] def invoice(self, start, end): """ Creates a new Invoice. Invoices are an Artifice datamodel that represent a set of billable entries assigned to a client on a given Date. An Invoice offers very little of its own opinions, requiring a backend plugin to operate. @returns: invoice """ if self.invoice_type is None: invoice_type = self.conn.config["main"]["invoice:object"] if ":" not in invoice_type: raise AttributeError("Invoice configuration incorrect! %s" % invoice_type) module, call = invoice_type.split(":") _package = __import__(module, globals(), locals(), [ call ]) funct = getattr(_package, call) self.invoice_type = funct config = self.conn.config["invoice_object"] invoice = self.invoice_type(self, config) return invoice def resources(self, start, end): if not self._resources: date_fields = [{ "field": "timestamp", "op": "ge", "value": start.strftime(date_format) }, { "field": "timestamp", "op": "lt", "value": end.strftime(date_format) }, { "field": "project_id", "op": "eq", "value": self.tenant["id"] }, ] # Sets up our resources as Ceilometer objects. # That's cool, I think. self._resources = self.conn.ceilometer.resources.list(date_fields) return self._resources # def usage(self, start, end, section=None): def usage(self, start, end): """ Usage is the meat of Artifice, returning a dict of location to sub-information """ # Returns a usage dict, based on regions. vms = {} vm_to_region = {} ports = {} usage_by_dc = {} writing_to = None vms = [] networks = [] storage = [] volumes = [] # Object storage is mapped by project_id for resource in self.resources(start, end): # print dir(resource) rels = [link["rel"] for link in resource.links if link["rel"] != 'self' ] if "storage.objects" in rels: # Unknown how this data layout happens yet. storage.append(Resource(resource, self.conn)) pass elif "network" in rels: # Have we seen the VM that owns this yet? networks.append(Resource(resource , self.conn)) elif "volumne" in rels: volumes.append( Resource(resource, self.conn) ) elif 'instance' in rels: vms.append(Resource(resource, self.conn )) datacenters = {} region_tmpl = { "vms": vms, "network": networks, "objects": storage, "volumes": volumes } return Usage(region_tmpl, start, end, self.conn) class Usage(object): """ This is a dict-like object containing all the datacenters and meters available in those datacenters. """ def __init__(self, contents, start, end, conn): self.contents = contents self.start = start self.end = end self.conn = conn self._vms = [] self._objects = [] self._volumes = [] # Replaces all the internal references with better references to # actual metered values. # self._replace() @property def vms(self): if not self._vms: vms = [] for vm in self.contents["vms"]: VM = resources.VM(vm, self.start, self.end) md = vm["metadata"] host = md["host"] VM.location = self.conn.host_to_dc( vm["metadata"]["host"] ) vms.append(VM) self._vms = vms return self._vms @property def objects(self): if not self._objects: objs = [] for object_ in self.contents["objects"]: obj = resources.Object(object_, self.start, self.end) objs.append(obj) self._objs = objs return self._objs @property def volumes(self): if not self._volumes: objs = [] for obj in self.contents["volumes"]: obj = resources.Volume(obj, self.start, self.end) objs.append(obj) self._volumes = objs return self._volumes # def __getitem__(self, item): # return self.contents[item] def __iter__(self): return self def next(self): # pass keys = self.contents.keys() for key in keys: yield key raise StopIteration() def save(self): """ Iterate the list of things; save them to DB. """ for vm in self.vms: vm.save() for obj in self.objects: obj.save() for vol in self.volumes: vol.save() class Resource(object): def __init__(self, resource, conn): self.resource = resource self.conn = conn self._meters = {} # def __getitem__(self, item): # return self.resource def meter(self, name, start, end): pass # Return a named meter for meter in self.resource.links: if meter["rel"] == name: m = Meter(self, meter["href"], self.conn, start, end) self._meters[name] = m return m raise AttributeError("no such meter %s" % name) def __getitem__(self, name): # print name # print self.resource # print self.resource[name] return getattr(self.resource, name) # return self.resource.name @property def meters(self): if not self._meters: meters = [] for link in self.resource["links"]: if link["rel"] == "self": continue meter = Meter(self, link, self.conn) meters.append(meter) self._meters = meters return self._meters class Meter(object): def __init__(self, resource, link, conn, start=None, end=None): self.resource = resource self.link = link self.conn = conn self.start = start self.end = end # self.meter = meter def __getitem__(self, x): if isinstance(x, slice): # Woo pass pass def volume(self): return self.usage(self.start, self.end) def usage(self, start, end): """ Usage condenses the entirety of a meter into a single datapoint: A volume value that we can plot as a single number against previous usage for a given range. """ measurements = get_meter(self, start, end, self.conn.auth.auth_token) # return measurements # print measurements self.measurements = defaultdict(list) self.type = set([a["counter_type"] for a in measurements]) if len(self.type) > 1: # That's a big problem raise RuntimeError("Too many types for measurement!") elif len(self.type) == 0: raise RuntimeError("No types!") else: self.type = self.type.pop() type_ = None if self.type == "cumulative": # The usage is the last one, which is the highest value. # # Base it just on the resource ID. # Is this a reasonable thing to do? # Composition style: resource.meter("cpu_util").usage(start, end) == artifact type_ = Cumulative elif self.type == "gauge": type_ = Gauge # return Gauge(self.Resource, ) elif self.type == "delta": type_ = Delta return type_(self.resource, measurements, start, end) def save(self): if not self.start and self.end: raise AttributeError("Needs start and end defined to save") self.volume().save() class Artifact(object): """ Provides base artifact controls; generic typing information for the artifact structures. """ def __init__(self, resource, usage, start, end): self.resource = resource self.usage = usage self.start = start self.end = end def __getitem__(self, item): if item in self._data: return self._data[item] raise KeyError("no such item %s" % item) def save(self): """ Persists to our database backend. Opinionatedly this is a sql datastore. """ value = self.volume() session = self.resource.conn.session # self.artifice. try: tenant_id = self.resource["tenant_id"] except KeyError: tenant_id = self.resource["project_id"] resource_id = self.resource["resource_id"] tenant = session.query(tenants.Tenant).get(tenant_id) if tenant is None: res = resources.Resource() tenant = tenants.Tenant() tenant.id = tenant_id res.id = resource_id res.tenant = tenant session.add(res) session.add(tenant) else: try: res = session.query(resources.Resource).filter(resources.Resource.id == resource_id)[0] tenant = res.tenant except IndexError: res = resources.Resource() tenant = tenants.Tenant() tenant.id = tenant_id res.id = resource_id res.tenant = tenant session.add(res) session.add(tenant) this_usage = usage.Usage( res, tenant, value, self.start, self.end, ) session.add(this_usage) session.commit() # Persist to Postgres def volume(self): """ Default billable number for this volume """ return sum([x["counter_volume"] for x in self.usage]) class Cumulative(Artifact): def volume(self): measurements = self.usage measurements = sorted( measurements, key= lambda x: x["timestamp"] ) total_usage = measurements[-1]["counter_volume"] - measurements[0]["counter_volume"] return total_usage # Gauge and Delta have very little to do: They are expected only to # exist as "not a cumulative" sort of artifact. class Gauge(Artifact): def volume(self): """ Default billable number for this volume """ # print "Usage is %s" % self.usage usage = sorted(self.usage, key=lambda x: x["timestamp"]) blocks = [] curr = [usage[0]] last = usage[0] try: last["timestamp"] = datetime.datetime.strptime(last["timestamp"], date_format) except ValueError: last["timestamp"] = datetime.datetime.strptime(last["timestamp"], other_date_format) except TypeError: pass for val in usage[1:]: try: val["timestamp"] = datetime.datetime.strptime(val["timestamp"], date_format) except ValueError: val["timestamp"] = datetime.datetime.strptime(val["timestamp"], other_date_format) except TypeError: pass if (val['timestamp'] - last["timestamp"]) > datetime.timedelta(hours=1): blocks.append(curr) curr = [val] last = val else: curr.append(val) # We are now sorted into 1-hour blocks totals = [] for block in blocks: usage = max( [v["counter_volume"] for v in block]) totals.append( usage ) # totals = [max(x, key=lambda val: val["counter_volume"] ) for x in blocks] # totals is now an array of max values per hour for a given month. # print totals return sum(totals) class Delta(Artifact): pass
}) if r.ok:
random_line_split
fint.py
import re, os, csv, sys, time import json, random, argparse from collections import Counter from selenium import webdriver from selenium.webdriver.common.keys import Keys from selenium.webdriver.firefox.options import Options from selenium.common.exceptions import TimeoutException, NoSuchElementException VERSION = "0.2" BANNER = """ {0} v. {1} - Fint: Find users who interacted with a profile by sowdust """.format(sys.argv[0], VERSION) BASE_URL = 'https://mbasic.facebook.com' def log(m): return def pause(min=2, max=5): return round(random.uniform(min, max), 1) def do_login(driver, usr, pwd): log('[*] Trying to log in with user %s' % usr) driver.get('https://www.facebook.com') try: elem = driver.find_element_by_xpath('//button[@data-cookiebanner="accept_button"]') elem.click() except Exception as ex: print('[!] Error while accepting cookies:') print(ex) try: elem = driver.find_element_by_id('email') elem.send_keys(usr) elem = driver.find_element_by_id('pass') elem.send_keys(pwd) except NoSuchElementException: elem = driver.find_element_by_name('email') elem.send_keys(usr) elem = driver.find_element_by_name('pass') elem.send_keys(pwd) except Exception as ex: print('[!] Error while logging in:') print(ex) sys.exit(0) elem.send_keys(Keys.RETURN) time.sleep(pause(2, 3)) def check_login(driver): time.sleep(pause(3, 4)) if 'href="/me/"' not in driver.page_source: print('[!] Not logged in. Did you use valid credentials?') print(driver.page_source) sys.exit(0) else: log('[*] Logged in') def get_stories_urls(html, target): # only return stories BY the user user_token = 'id=%s' % target links = re.findall('(/story.php\?story_fbid=[^"#]+)', html) return [ '%s%s' % (BASE_URL, x.replace('&amp;', '&')) for x in set(links) if user_token in x ] def get_photos_urls(target_id, html):
def get_all_photos(driver, target_id, limit=100): url = 'https://mbasic.facebook.com/profile.php?id=%s&v=photos' % target_id driver.get(url) time.sleep(pause()) see_all = re.findall('<a href="([^"#]+)">See All</a>', driver.page_source) photos = [] if not see_all: return photos else: driver.get(BASE_URL + see_all[0].replace('&amp;', '&')) while len(photos) < limit: photos += get_photos_urls(target_id, driver.page_source) see_more = re.findall( '<a href="(.[^"#]*)"><span>See More Photos</span></a>', driver.page_source) if not see_more: see_more = re.findall('<a href="(.[^"#]*)">Show more</a>', driver.page_source) if see_more: url = BASE_URL + see_more[0].replace('&amp;', '&') time.sleep(pause()) driver.get(url) else: break return photos def get_all_stories(driver, target, limit=100): url = 'https://mbasic.facebook.com/%s?v=timeline' % target driver.get(url) stories = [] while len(stories) < limit: stories += get_stories_urls(driver.page_source, target) see_more = re.findall( '<a href="(.[^"#]*)"><span>See More Stories</span></a>', driver.page_source) if not see_more: see_more = re.findall('<a href="(.[^"#]*)">Show more</a>', driver.page_source) if see_more: url = BASE_URL + see_more[0].replace('&amp;', '&') time.sleep(pause()) driver.get(url) else: break return stories def get_all_comments(driver, url, limit=200, cur_length=0): if cur_length >= limit: return [] driver.get(url) html = driver.page_source.encode("utf-8", errors='replace').decode("utf-8", errors="replace") commenters = parse_commenters(html) cur_length += len(commenters) more_comments_url = re.findall( '<div class=".[^"]*" id="see_next_[0-9]+"><a href="(.[^"]*)">', html) more_comments_url = [ '%s%s' % (BASE_URL, x.replace('&amp;', '&')) for x in more_comments_url ] if (more_comments_url) and limit > cur_length: time.sleep(pause()) url = more_comments_url[0] commenters += get_all_comments(driver, url, limit, cur_length=cur_length) return commenters # given a driver on a story.php page, extracts all users who have reacted # takes only 1st level reactions (not consideringr reactions to comments etc.) def get_all_reactions(driver, url, reactions_per_page=999, limit=2000, cur_length=0): if cur_length >= limit: return [] driver.get(url) html = driver.page_source.encode("utf-8", errors='replace').decode("utf-8", errors="replace") reactions = parse_likers(html) cur_length += len(reactions) reaction_urls = re.findall( '(/ufi/reaction/profile/browser/(?!.*(?:reaction_type|total_count=0)).[^"]*)', html) reaction_urls = [ '%s%s' % (BASE_URL, x.replace('&amp;', '&').replace( '?limit=10', '?limit=%d' % reactions_per_page)) for x in reaction_urls ] if (reaction_urls) and limit > cur_length: time.sleep(pause()) url = reaction_urls[0] reactions += get_all_reactions(driver, url, reactions_per_page, limit, cur_length) return reactions # Given a story.php page, return a list of (url, display name) def parse_commenters(html): return re.findall( '<h3><a class="[^"]+" href="([^"]+)\?r[^"]*">([^<]*)</a>', html) # Given a "reactions" page ufi/reaction/profile/browser/, returns a list of (url, display name) def parse_likers(html): return re.findall( '<h3 class=".[^"]*"><a href="(.[^"]*)[^"]*">(.[^<]*)</a></h3>', html) def profile_picture(driver, target_username): url = '%sphoto.php?fbid=%s' % (BASE_URL, target_username) driver.get(url) commenters = parse_commenters(driver.page_source) # given a list of [username, name] returns a list of [id, name, username] def fill_user_ids(driver, users): res = [] c = 0 msg = '[*] Retreiving user ids... ' try: for u in users: c += 1 msg = '%s[*] Retreiving user ids... %d of %d' % ('\r' * len(msg), c, len(users)) print(msg, end='\r') time.sleep(pause()) fbid = get_user_id(driver, u[0]) user = (fbid, u[1], u[0]) res.append(user) except (KeyboardInterrupt, SystemExit): print('[!] KeyboardInterrupt received. Exiting...') return res except Exception as ex: print('[!] Error while retrieving user ids') print(ex) return res return res # given a username, finds the fb user id from the source of the profile page def get_user_id(driver, username): url = 'https://www.facebook.com/%s' % username.replace('/', '') driver.get(url) fbid = re.findall('"scale":1,"userID":"([0-9]+)"}', driver.page_source) if fbid: return fbid[0] else: print('[!] Error while getting id of user %s' % username) return -1 def get_username(driver, userid): url = 'https://www.facebook.com/%s' % userid driver.get(url) time.sleep(pause()) return driver.current_url.split('/')[-1].split('?')[0] def parse_args(): parser = argparse.ArgumentParser( description='Find users who interacted with a Facebook profile.') parser.add_argument( '-fu', '--user', metavar='USERNAME', type=str, help='Username of the Facebook account that will be used for scraping') parser.add_argument( '-fp', '--password', metavar='PASSWORD', type=str, help='Username of the Facebook account that will be used for scraping') parser.add_argument( '-t', '--target', metavar='TARGET', type=str, help='Username or numeric id of the target Facebook account') parser.add_argument('-ls', '--limit-stories', metavar='LIMIT', type=int, default=20, help='Max number of stories to analyze') parser.add_argument('-lp', '--limit-photos', metavar='LIMIT', type=int, default=20, help='Max number of photos to analyze') parser.add_argument( '-lr', '--limit-reactions', metavar='LIMIT', default=1000, type=int, help='Max number of reactions to analyze for each story') parser.add_argument( '-lc', '--limit-comments', metavar='LIMIT', default=100, type=int, help='Max number of comments to analyze for each story') parser.add_argument('-o', '--output', metavar='OUTPUTFILE', type=str, help='Specify the name of the pivots output file') parser.add_argument('-csv', '--csv-output', metavar='CSVOUTPUTFILE', type=str, help='Store output file also in CSV format') parser.add_argument( '-q', '--headless', action='store_true', help='Run browser in headless mode. No browser window will be shown.') parser.add_argument('-d', '--driver-path', metavar='EXECUTABLE', type=str, help='Path to geckodriver executable') args = parser.parse_args(args=None if len(sys.argv) > 1 else ['--help']) return args def print_statistics(commenters, reactions): print('-' * 78) print(' ' * 34, end=' ') print('STATISTICS') print('-' * 78) print('Most comments:') for u in Counter(commenters).most_common(): print('[%d]\t%s (%s)' % (u[1], u[0][1], u[0][0])) print() print('Most reactions:') for u in Counter(reactions).most_common(): print('[%d]\t%s (%s)' % (u[1], u[0][1], u[0][0])) print() print('Total:') for u in Counter(commenters + reactions).most_common(): print('[%d]\t%s (%s)' % (u[1], u[0][1], u[0][0])) print() def store_csv(users, csv_file_path): print('[*] Storing users in csv file %s' % csv_file_path) with open(csv_file_path, mode='w', newline='', encoding='utf-8') as csv_file: writer = csv.writer(csv_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL) writer.writerow(['id', 'name', 'url']) for u in users: writer.writerow(u) def store_pivots(users, path): print('[*] Storing users id in file %s' % path) with open(path, 'w') as f: for u in users: f.write('%s\n' % u[0]) def check_file_exists(file): yes = {'yes', 'y', 'ye'} if os.path.isfile(file): print( '[!] Warning: output file %s already exists. Do you want to overwrite? [y/N]' % file, end=' ') choice = input().lower() if choice not in yes: sys.exit(0) def main(): print(BANNER) args = parse_args() options = Options() if args.headless: options.add_argument("--headless") driver = webdriver.Firefox(executable_path=args.driver_path, options=options) do_login(driver, args.user, args.password) check_login(driver) if args.target.isdigit(): target_id = args.target target_username = get_username(driver, target_id) else: target_id = get_user_id(driver, args.target) target_username = args.target print('[*] Selected target: %s (%s)' % (target_username, target_id)) urls_to_visit = [] commenters = [] reactions = [] users = [] print('[*] Getting photos links... ', end=" ") photos = get_all_photos(driver, target_id, args.limit_photos)[:args.limit_photos] print('%d photos found' % len(photos)) print('[*] Getting stories links... ', end=" ") stories = get_all_stories(driver, target_id, args.limit_stories)[:args.limit_stories] print('%d stories found' % len(stories)) print( '[*] Retreiving users who have interacted... press Ctrl+C when you have enough' ) msg = '' try: for url in photos + stories: commenters += parse_commenters(driver.page_source) if len(commenters) < args.limit_comments: commenters += get_all_comments(driver, url, limit=args.limit_comments) if len(reactions) < args.limit_reactions: reactions += get_all_reactions(driver, url, limit=args.limit_reactions) users = list(set(reactions).union(set(commenters))) msg = '%sUnique users: %d Comments: %d Reactions: %d' % ( '\r' * len(msg), len(users), len(commenters), len(reactions)) print(msg, end='\r') except (KeyboardInterrupt, SystemExit): print('[!] KeyboardInterrupt received. %d users retrieved' % len(users)) reactions = reactions[:args.limit_reactions] commenters = commenters[:args.limit_comments] users = list(set(reactions).union(set(commenters))) print_statistics(commenters, reactions) users = fill_user_ids(driver, users) if args.output: store_pivots(users, args.output) else: store_pivots(users, '%s-pivots.txt' % target_id) if args.csv_output: store_csv(users, args.csv_output) print('[*] Found %d comments and %d reactions from %d unique users ' % (len(commenters), len(reactions), len(users))) driver.close() if __name__ == '__main__': main()
links = re.findall('(/photo\.php\?[^;]*;id=%s[^"]+)' % target_id, html) return ['%s%s' % (BASE_URL, x.replace('&amp;', '&')) for x in set(links)]
identifier_body
fint.py
import re, os, csv, sys, time import json, random, argparse from collections import Counter from selenium import webdriver from selenium.webdriver.common.keys import Keys from selenium.webdriver.firefox.options import Options from selenium.common.exceptions import TimeoutException, NoSuchElementException VERSION = "0.2" BANNER = """ {0} v. {1} - Fint: Find users who interacted with a profile by sowdust """.format(sys.argv[0], VERSION) BASE_URL = 'https://mbasic.facebook.com' def log(m): return def pause(min=2, max=5): return round(random.uniform(min, max), 1) def do_login(driver, usr, pwd): log('[*] Trying to log in with user %s' % usr) driver.get('https://www.facebook.com') try: elem = driver.find_element_by_xpath('//button[@data-cookiebanner="accept_button"]') elem.click() except Exception as ex: print('[!] Error while accepting cookies:') print(ex) try: elem = driver.find_element_by_id('email') elem.send_keys(usr) elem = driver.find_element_by_id('pass') elem.send_keys(pwd) except NoSuchElementException: elem = driver.find_element_by_name('email') elem.send_keys(usr) elem = driver.find_element_by_name('pass') elem.send_keys(pwd) except Exception as ex: print('[!] Error while logging in:') print(ex) sys.exit(0) elem.send_keys(Keys.RETURN) time.sleep(pause(2, 3)) def check_login(driver): time.sleep(pause(3, 4)) if 'href="/me/"' not in driver.page_source: print('[!] Not logged in. Did you use valid credentials?') print(driver.page_source) sys.exit(0) else: log('[*] Logged in') def get_stories_urls(html, target): # only return stories BY the user user_token = 'id=%s' % target links = re.findall('(/story.php\?story_fbid=[^"#]+)', html) return [ '%s%s' % (BASE_URL, x.replace('&amp;', '&')) for x in set(links) if user_token in x ] def get_photos_urls(target_id, html): links = re.findall('(/photo\.php\?[^;]*;id=%s[^"]+)' % target_id, html) return ['%s%s' % (BASE_URL, x.replace('&amp;', '&')) for x in set(links)] def get_all_photos(driver, target_id, limit=100): url = 'https://mbasic.facebook.com/profile.php?id=%s&v=photos' % target_id driver.get(url) time.sleep(pause()) see_all = re.findall('<a href="([^"#]+)">See All</a>', driver.page_source) photos = [] if not see_all: return photos else: driver.get(BASE_URL + see_all[0].replace('&amp;', '&')) while len(photos) < limit: photos += get_photos_urls(target_id, driver.page_source) see_more = re.findall( '<a href="(.[^"#]*)"><span>See More Photos</span></a>', driver.page_source) if not see_more: see_more = re.findall('<a href="(.[^"#]*)">Show more</a>', driver.page_source) if see_more: url = BASE_URL + see_more[0].replace('&amp;', '&') time.sleep(pause()) driver.get(url) else: break return photos def get_all_stories(driver, target, limit=100): url = 'https://mbasic.facebook.com/%s?v=timeline' % target driver.get(url) stories = [] while len(stories) < limit: stories += get_stories_urls(driver.page_source, target) see_more = re.findall( '<a href="(.[^"#]*)"><span>See More Stories</span></a>', driver.page_source) if not see_more: see_more = re.findall('<a href="(.[^"#]*)">Show more</a>', driver.page_source) if see_more:
else: break return stories def get_all_comments(driver, url, limit=200, cur_length=0): if cur_length >= limit: return [] driver.get(url) html = driver.page_source.encode("utf-8", errors='replace').decode("utf-8", errors="replace") commenters = parse_commenters(html) cur_length += len(commenters) more_comments_url = re.findall( '<div class=".[^"]*" id="see_next_[0-9]+"><a href="(.[^"]*)">', html) more_comments_url = [ '%s%s' % (BASE_URL, x.replace('&amp;', '&')) for x in more_comments_url ] if (more_comments_url) and limit > cur_length: time.sleep(pause()) url = more_comments_url[0] commenters += get_all_comments(driver, url, limit, cur_length=cur_length) return commenters # given a driver on a story.php page, extracts all users who have reacted # takes only 1st level reactions (not consideringr reactions to comments etc.) def get_all_reactions(driver, url, reactions_per_page=999, limit=2000, cur_length=0): if cur_length >= limit: return [] driver.get(url) html = driver.page_source.encode("utf-8", errors='replace').decode("utf-8", errors="replace") reactions = parse_likers(html) cur_length += len(reactions) reaction_urls = re.findall( '(/ufi/reaction/profile/browser/(?!.*(?:reaction_type|total_count=0)).[^"]*)', html) reaction_urls = [ '%s%s' % (BASE_URL, x.replace('&amp;', '&').replace( '?limit=10', '?limit=%d' % reactions_per_page)) for x in reaction_urls ] if (reaction_urls) and limit > cur_length: time.sleep(pause()) url = reaction_urls[0] reactions += get_all_reactions(driver, url, reactions_per_page, limit, cur_length) return reactions # Given a story.php page, return a list of (url, display name) def parse_commenters(html): return re.findall( '<h3><a class="[^"]+" href="([^"]+)\?r[^"]*">([^<]*)</a>', html) # Given a "reactions" page ufi/reaction/profile/browser/, returns a list of (url, display name) def parse_likers(html): return re.findall( '<h3 class=".[^"]*"><a href="(.[^"]*)[^"]*">(.[^<]*)</a></h3>', html) def profile_picture(driver, target_username): url = '%sphoto.php?fbid=%s' % (BASE_URL, target_username) driver.get(url) commenters = parse_commenters(driver.page_source) # given a list of [username, name] returns a list of [id, name, username] def fill_user_ids(driver, users): res = [] c = 0 msg = '[*] Retreiving user ids... ' try: for u in users: c += 1 msg = '%s[*] Retreiving user ids... %d of %d' % ('\r' * len(msg), c, len(users)) print(msg, end='\r') time.sleep(pause()) fbid = get_user_id(driver, u[0]) user = (fbid, u[1], u[0]) res.append(user) except (KeyboardInterrupt, SystemExit): print('[!] KeyboardInterrupt received. Exiting...') return res except Exception as ex: print('[!] Error while retrieving user ids') print(ex) return res return res # given a username, finds the fb user id from the source of the profile page def get_user_id(driver, username): url = 'https://www.facebook.com/%s' % username.replace('/', '') driver.get(url) fbid = re.findall('"scale":1,"userID":"([0-9]+)"}', driver.page_source) if fbid: return fbid[0] else: print('[!] Error while getting id of user %s' % username) return -1 def get_username(driver, userid): url = 'https://www.facebook.com/%s' % userid driver.get(url) time.sleep(pause()) return driver.current_url.split('/')[-1].split('?')[0] def parse_args(): parser = argparse.ArgumentParser( description='Find users who interacted with a Facebook profile.') parser.add_argument( '-fu', '--user', metavar='USERNAME', type=str, help='Username of the Facebook account that will be used for scraping') parser.add_argument( '-fp', '--password', metavar='PASSWORD', type=str, help='Username of the Facebook account that will be used for scraping') parser.add_argument( '-t', '--target', metavar='TARGET', type=str, help='Username or numeric id of the target Facebook account') parser.add_argument('-ls', '--limit-stories', metavar='LIMIT', type=int, default=20, help='Max number of stories to analyze') parser.add_argument('-lp', '--limit-photos', metavar='LIMIT', type=int, default=20, help='Max number of photos to analyze') parser.add_argument( '-lr', '--limit-reactions', metavar='LIMIT', default=1000, type=int, help='Max number of reactions to analyze for each story') parser.add_argument( '-lc', '--limit-comments', metavar='LIMIT', default=100, type=int, help='Max number of comments to analyze for each story') parser.add_argument('-o', '--output', metavar='OUTPUTFILE', type=str, help='Specify the name of the pivots output file') parser.add_argument('-csv', '--csv-output', metavar='CSVOUTPUTFILE', type=str, help='Store output file also in CSV format') parser.add_argument( '-q', '--headless', action='store_true', help='Run browser in headless mode. No browser window will be shown.') parser.add_argument('-d', '--driver-path', metavar='EXECUTABLE', type=str, help='Path to geckodriver executable') args = parser.parse_args(args=None if len(sys.argv) > 1 else ['--help']) return args def print_statistics(commenters, reactions): print('-' * 78) print(' ' * 34, end=' ') print('STATISTICS') print('-' * 78) print('Most comments:') for u in Counter(commenters).most_common(): print('[%d]\t%s (%s)' % (u[1], u[0][1], u[0][0])) print() print('Most reactions:') for u in Counter(reactions).most_common(): print('[%d]\t%s (%s)' % (u[1], u[0][1], u[0][0])) print() print('Total:') for u in Counter(commenters + reactions).most_common(): print('[%d]\t%s (%s)' % (u[1], u[0][1], u[0][0])) print() def store_csv(users, csv_file_path): print('[*] Storing users in csv file %s' % csv_file_path) with open(csv_file_path, mode='w', newline='', encoding='utf-8') as csv_file: writer = csv.writer(csv_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL) writer.writerow(['id', 'name', 'url']) for u in users: writer.writerow(u) def store_pivots(users, path): print('[*] Storing users id in file %s' % path) with open(path, 'w') as f: for u in users: f.write('%s\n' % u[0]) def check_file_exists(file): yes = {'yes', 'y', 'ye'} if os.path.isfile(file): print( '[!] Warning: output file %s already exists. Do you want to overwrite? [y/N]' % file, end=' ') choice = input().lower() if choice not in yes: sys.exit(0) def main(): print(BANNER) args = parse_args() options = Options() if args.headless: options.add_argument("--headless") driver = webdriver.Firefox(executable_path=args.driver_path, options=options) do_login(driver, args.user, args.password) check_login(driver) if args.target.isdigit(): target_id = args.target target_username = get_username(driver, target_id) else: target_id = get_user_id(driver, args.target) target_username = args.target print('[*] Selected target: %s (%s)' % (target_username, target_id)) urls_to_visit = [] commenters = [] reactions = [] users = [] print('[*] Getting photos links... ', end=" ") photos = get_all_photos(driver, target_id, args.limit_photos)[:args.limit_photos] print('%d photos found' % len(photos)) print('[*] Getting stories links... ', end=" ") stories = get_all_stories(driver, target_id, args.limit_stories)[:args.limit_stories] print('%d stories found' % len(stories)) print( '[*] Retreiving users who have interacted... press Ctrl+C when you have enough' ) msg = '' try: for url in photos + stories: commenters += parse_commenters(driver.page_source) if len(commenters) < args.limit_comments: commenters += get_all_comments(driver, url, limit=args.limit_comments) if len(reactions) < args.limit_reactions: reactions += get_all_reactions(driver, url, limit=args.limit_reactions) users = list(set(reactions).union(set(commenters))) msg = '%sUnique users: %d Comments: %d Reactions: %d' % ( '\r' * len(msg), len(users), len(commenters), len(reactions)) print(msg, end='\r') except (KeyboardInterrupt, SystemExit): print('[!] KeyboardInterrupt received. %d users retrieved' % len(users)) reactions = reactions[:args.limit_reactions] commenters = commenters[:args.limit_comments] users = list(set(reactions).union(set(commenters))) print_statistics(commenters, reactions) users = fill_user_ids(driver, users) if args.output: store_pivots(users, args.output) else: store_pivots(users, '%s-pivots.txt' % target_id) if args.csv_output: store_csv(users, args.csv_output) print('[*] Found %d comments and %d reactions from %d unique users ' % (len(commenters), len(reactions), len(users))) driver.close() if __name__ == '__main__': main()
url = BASE_URL + see_more[0].replace('&amp;', '&') time.sleep(pause()) driver.get(url)
conditional_block
fint.py
import re, os, csv, sys, time import json, random, argparse from collections import Counter from selenium import webdriver from selenium.webdriver.common.keys import Keys from selenium.webdriver.firefox.options import Options from selenium.common.exceptions import TimeoutException, NoSuchElementException VERSION = "0.2" BANNER = """ {0} v. {1} - Fint: Find users who interacted with a profile by sowdust """.format(sys.argv[0], VERSION) BASE_URL = 'https://mbasic.facebook.com' def log(m): return def pause(min=2, max=5): return round(random.uniform(min, max), 1) def do_login(driver, usr, pwd): log('[*] Trying to log in with user %s' % usr) driver.get('https://www.facebook.com') try: elem = driver.find_element_by_xpath('//button[@data-cookiebanner="accept_button"]') elem.click() except Exception as ex: print('[!] Error while accepting cookies:') print(ex) try: elem = driver.find_element_by_id('email') elem.send_keys(usr) elem = driver.find_element_by_id('pass') elem.send_keys(pwd) except NoSuchElementException: elem = driver.find_element_by_name('email') elem.send_keys(usr) elem = driver.find_element_by_name('pass') elem.send_keys(pwd) except Exception as ex: print('[!] Error while logging in:') print(ex) sys.exit(0) elem.send_keys(Keys.RETURN) time.sleep(pause(2, 3)) def check_login(driver): time.sleep(pause(3, 4)) if 'href="/me/"' not in driver.page_source: print('[!] Not logged in. Did you use valid credentials?') print(driver.page_source) sys.exit(0) else: log('[*] Logged in') def get_stories_urls(html, target): # only return stories BY the user user_token = 'id=%s' % target links = re.findall('(/story.php\?story_fbid=[^"#]+)', html) return [ '%s%s' % (BASE_URL, x.replace('&amp;', '&')) for x in set(links) if user_token in x ] def get_photos_urls(target_id, html): links = re.findall('(/photo\.php\?[^;]*;id=%s[^"]+)' % target_id, html) return ['%s%s' % (BASE_URL, x.replace('&amp;', '&')) for x in set(links)] def get_all_photos(driver, target_id, limit=100): url = 'https://mbasic.facebook.com/profile.php?id=%s&v=photos' % target_id driver.get(url) time.sleep(pause()) see_all = re.findall('<a href="([^"#]+)">See All</a>', driver.page_source) photos = [] if not see_all: return photos else: driver.get(BASE_URL + see_all[0].replace('&amp;', '&')) while len(photos) < limit: photos += get_photos_urls(target_id, driver.page_source) see_more = re.findall( '<a href="(.[^"#]*)"><span>See More Photos</span></a>', driver.page_source) if not see_more: see_more = re.findall('<a href="(.[^"#]*)">Show more</a>', driver.page_source) if see_more: url = BASE_URL + see_more[0].replace('&amp;', '&') time.sleep(pause()) driver.get(url) else: break return photos def get_all_stories(driver, target, limit=100): url = 'https://mbasic.facebook.com/%s?v=timeline' % target driver.get(url) stories = [] while len(stories) < limit: stories += get_stories_urls(driver.page_source, target) see_more = re.findall( '<a href="(.[^"#]*)"><span>See More Stories</span></a>', driver.page_source) if not see_more: see_more = re.findall('<a href="(.[^"#]*)">Show more</a>', driver.page_source) if see_more: url = BASE_URL + see_more[0].replace('&amp;', '&') time.sleep(pause()) driver.get(url) else: break return stories def get_all_comments(driver, url, limit=200, cur_length=0): if cur_length >= limit: return [] driver.get(url) html = driver.page_source.encode("utf-8", errors='replace').decode("utf-8", errors="replace") commenters = parse_commenters(html) cur_length += len(commenters) more_comments_url = re.findall( '<div class=".[^"]*" id="see_next_[0-9]+"><a href="(.[^"]*)">', html) more_comments_url = [ '%s%s' % (BASE_URL, x.replace('&amp;', '&')) for x in more_comments_url ] if (more_comments_url) and limit > cur_length: time.sleep(pause()) url = more_comments_url[0] commenters += get_all_comments(driver, url, limit, cur_length=cur_length) return commenters # given a driver on a story.php page, extracts all users who have reacted # takes only 1st level reactions (not consideringr reactions to comments etc.) def get_all_reactions(driver, url, reactions_per_page=999, limit=2000, cur_length=0): if cur_length >= limit: return [] driver.get(url) html = driver.page_source.encode("utf-8", errors='replace').decode("utf-8", errors="replace") reactions = parse_likers(html) cur_length += len(reactions) reaction_urls = re.findall( '(/ufi/reaction/profile/browser/(?!.*(?:reaction_type|total_count=0)).[^"]*)', html) reaction_urls = [ '%s%s' % (BASE_URL, x.replace('&amp;', '&').replace( '?limit=10', '?limit=%d' % reactions_per_page)) for x in reaction_urls ] if (reaction_urls) and limit > cur_length: time.sleep(pause()) url = reaction_urls[0] reactions += get_all_reactions(driver, url, reactions_per_page, limit, cur_length) return reactions # Given a story.php page, return a list of (url, display name) def
(html): return re.findall( '<h3><a class="[^"]+" href="([^"]+)\?r[^"]*">([^<]*)</a>', html) # Given a "reactions" page ufi/reaction/profile/browser/, returns a list of (url, display name) def parse_likers(html): return re.findall( '<h3 class=".[^"]*"><a href="(.[^"]*)[^"]*">(.[^<]*)</a></h3>', html) def profile_picture(driver, target_username): url = '%sphoto.php?fbid=%s' % (BASE_URL, target_username) driver.get(url) commenters = parse_commenters(driver.page_source) # given a list of [username, name] returns a list of [id, name, username] def fill_user_ids(driver, users): res = [] c = 0 msg = '[*] Retreiving user ids... ' try: for u in users: c += 1 msg = '%s[*] Retreiving user ids... %d of %d' % ('\r' * len(msg), c, len(users)) print(msg, end='\r') time.sleep(pause()) fbid = get_user_id(driver, u[0]) user = (fbid, u[1], u[0]) res.append(user) except (KeyboardInterrupt, SystemExit): print('[!] KeyboardInterrupt received. Exiting...') return res except Exception as ex: print('[!] Error while retrieving user ids') print(ex) return res return res # given a username, finds the fb user id from the source of the profile page def get_user_id(driver, username): url = 'https://www.facebook.com/%s' % username.replace('/', '') driver.get(url) fbid = re.findall('"scale":1,"userID":"([0-9]+)"}', driver.page_source) if fbid: return fbid[0] else: print('[!] Error while getting id of user %s' % username) return -1 def get_username(driver, userid): url = 'https://www.facebook.com/%s' % userid driver.get(url) time.sleep(pause()) return driver.current_url.split('/')[-1].split('?')[0] def parse_args(): parser = argparse.ArgumentParser( description='Find users who interacted with a Facebook profile.') parser.add_argument( '-fu', '--user', metavar='USERNAME', type=str, help='Username of the Facebook account that will be used for scraping') parser.add_argument( '-fp', '--password', metavar='PASSWORD', type=str, help='Username of the Facebook account that will be used for scraping') parser.add_argument( '-t', '--target', metavar='TARGET', type=str, help='Username or numeric id of the target Facebook account') parser.add_argument('-ls', '--limit-stories', metavar='LIMIT', type=int, default=20, help='Max number of stories to analyze') parser.add_argument('-lp', '--limit-photos', metavar='LIMIT', type=int, default=20, help='Max number of photos to analyze') parser.add_argument( '-lr', '--limit-reactions', metavar='LIMIT', default=1000, type=int, help='Max number of reactions to analyze for each story') parser.add_argument( '-lc', '--limit-comments', metavar='LIMIT', default=100, type=int, help='Max number of comments to analyze for each story') parser.add_argument('-o', '--output', metavar='OUTPUTFILE', type=str, help='Specify the name of the pivots output file') parser.add_argument('-csv', '--csv-output', metavar='CSVOUTPUTFILE', type=str, help='Store output file also in CSV format') parser.add_argument( '-q', '--headless', action='store_true', help='Run browser in headless mode. No browser window will be shown.') parser.add_argument('-d', '--driver-path', metavar='EXECUTABLE', type=str, help='Path to geckodriver executable') args = parser.parse_args(args=None if len(sys.argv) > 1 else ['--help']) return args def print_statistics(commenters, reactions): print('-' * 78) print(' ' * 34, end=' ') print('STATISTICS') print('-' * 78) print('Most comments:') for u in Counter(commenters).most_common(): print('[%d]\t%s (%s)' % (u[1], u[0][1], u[0][0])) print() print('Most reactions:') for u in Counter(reactions).most_common(): print('[%d]\t%s (%s)' % (u[1], u[0][1], u[0][0])) print() print('Total:') for u in Counter(commenters + reactions).most_common(): print('[%d]\t%s (%s)' % (u[1], u[0][1], u[0][0])) print() def store_csv(users, csv_file_path): print('[*] Storing users in csv file %s' % csv_file_path) with open(csv_file_path, mode='w', newline='', encoding='utf-8') as csv_file: writer = csv.writer(csv_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL) writer.writerow(['id', 'name', 'url']) for u in users: writer.writerow(u) def store_pivots(users, path): print('[*] Storing users id in file %s' % path) with open(path, 'w') as f: for u in users: f.write('%s\n' % u[0]) def check_file_exists(file): yes = {'yes', 'y', 'ye'} if os.path.isfile(file): print( '[!] Warning: output file %s already exists. Do you want to overwrite? [y/N]' % file, end=' ') choice = input().lower() if choice not in yes: sys.exit(0) def main(): print(BANNER) args = parse_args() options = Options() if args.headless: options.add_argument("--headless") driver = webdriver.Firefox(executable_path=args.driver_path, options=options) do_login(driver, args.user, args.password) check_login(driver) if args.target.isdigit(): target_id = args.target target_username = get_username(driver, target_id) else: target_id = get_user_id(driver, args.target) target_username = args.target print('[*] Selected target: %s (%s)' % (target_username, target_id)) urls_to_visit = [] commenters = [] reactions = [] users = [] print('[*] Getting photos links... ', end=" ") photos = get_all_photos(driver, target_id, args.limit_photos)[:args.limit_photos] print('%d photos found' % len(photos)) print('[*] Getting stories links... ', end=" ") stories = get_all_stories(driver, target_id, args.limit_stories)[:args.limit_stories] print('%d stories found' % len(stories)) print( '[*] Retreiving users who have interacted... press Ctrl+C when you have enough' ) msg = '' try: for url in photos + stories: commenters += parse_commenters(driver.page_source) if len(commenters) < args.limit_comments: commenters += get_all_comments(driver, url, limit=args.limit_comments) if len(reactions) < args.limit_reactions: reactions += get_all_reactions(driver, url, limit=args.limit_reactions) users = list(set(reactions).union(set(commenters))) msg = '%sUnique users: %d Comments: %d Reactions: %d' % ( '\r' * len(msg), len(users), len(commenters), len(reactions)) print(msg, end='\r') except (KeyboardInterrupt, SystemExit): print('[!] KeyboardInterrupt received. %d users retrieved' % len(users)) reactions = reactions[:args.limit_reactions] commenters = commenters[:args.limit_comments] users = list(set(reactions).union(set(commenters))) print_statistics(commenters, reactions) users = fill_user_ids(driver, users) if args.output: store_pivots(users, args.output) else: store_pivots(users, '%s-pivots.txt' % target_id) if args.csv_output: store_csv(users, args.csv_output) print('[*] Found %d comments and %d reactions from %d unique users ' % (len(commenters), len(reactions), len(users))) driver.close() if __name__ == '__main__': main()
parse_commenters
identifier_name
fint.py
import re, os, csv, sys, time import json, random, argparse from collections import Counter from selenium import webdriver from selenium.webdriver.common.keys import Keys from selenium.webdriver.firefox.options import Options from selenium.common.exceptions import TimeoutException, NoSuchElementException VERSION = "0.2" BANNER = """ {0} v. {1} - Fint: Find users who interacted with a profile by sowdust """.format(sys.argv[0], VERSION) BASE_URL = 'https://mbasic.facebook.com' def log(m): return def pause(min=2, max=5): return round(random.uniform(min, max), 1) def do_login(driver, usr, pwd): log('[*] Trying to log in with user %s' % usr) driver.get('https://www.facebook.com') try: elem = driver.find_element_by_xpath('//button[@data-cookiebanner="accept_button"]') elem.click() except Exception as ex: print('[!] Error while accepting cookies:') print(ex) try: elem = driver.find_element_by_id('email') elem.send_keys(usr) elem = driver.find_element_by_id('pass') elem.send_keys(pwd) except NoSuchElementException: elem = driver.find_element_by_name('email') elem.send_keys(usr) elem = driver.find_element_by_name('pass') elem.send_keys(pwd) except Exception as ex: print('[!] Error while logging in:') print(ex) sys.exit(0) elem.send_keys(Keys.RETURN) time.sleep(pause(2, 3)) def check_login(driver): time.sleep(pause(3, 4)) if 'href="/me/"' not in driver.page_source: print('[!] Not logged in. Did you use valid credentials?') print(driver.page_source) sys.exit(0) else: log('[*] Logged in') def get_stories_urls(html, target): # only return stories BY the user user_token = 'id=%s' % target links = re.findall('(/story.php\?story_fbid=[^"#]+)', html) return [ '%s%s' % (BASE_URL, x.replace('&amp;', '&')) for x in set(links) if user_token in x ] def get_photos_urls(target_id, html): links = re.findall('(/photo\.php\?[^;]*;id=%s[^"]+)' % target_id, html) return ['%s%s' % (BASE_URL, x.replace('&amp;', '&')) for x in set(links)] def get_all_photos(driver, target_id, limit=100): url = 'https://mbasic.facebook.com/profile.php?id=%s&v=photos' % target_id driver.get(url) time.sleep(pause()) see_all = re.findall('<a href="([^"#]+)">See All</a>', driver.page_source) photos = [] if not see_all: return photos else: driver.get(BASE_URL + see_all[0].replace('&amp;', '&')) while len(photos) < limit: photos += get_photos_urls(target_id, driver.page_source) see_more = re.findall( '<a href="(.[^"#]*)"><span>See More Photos</span></a>', driver.page_source) if not see_more: see_more = re.findall('<a href="(.[^"#]*)">Show more</a>', driver.page_source) if see_more: url = BASE_URL + see_more[0].replace('&amp;', '&') time.sleep(pause()) driver.get(url) else: break return photos def get_all_stories(driver, target, limit=100): url = 'https://mbasic.facebook.com/%s?v=timeline' % target driver.get(url) stories = [] while len(stories) < limit: stories += get_stories_urls(driver.page_source, target) see_more = re.findall( '<a href="(.[^"#]*)"><span>See More Stories</span></a>', driver.page_source) if not see_more: see_more = re.findall('<a href="(.[^"#]*)">Show more</a>', driver.page_source) if see_more: url = BASE_URL + see_more[0].replace('&amp;', '&') time.sleep(pause()) driver.get(url) else: break return stories def get_all_comments(driver, url, limit=200, cur_length=0): if cur_length >= limit: return [] driver.get(url) html = driver.page_source.encode("utf-8", errors='replace').decode("utf-8", errors="replace") commenters = parse_commenters(html) cur_length += len(commenters) more_comments_url = re.findall( '<div class=".[^"]*" id="see_next_[0-9]+"><a href="(.[^"]*)">', html) more_comments_url = [ '%s%s' % (BASE_URL, x.replace('&amp;', '&')) for x in more_comments_url ] if (more_comments_url) and limit > cur_length: time.sleep(pause()) url = more_comments_url[0] commenters += get_all_comments(driver, url, limit, cur_length=cur_length) return commenters # given a driver on a story.php page, extracts all users who have reacted # takes only 1st level reactions (not consideringr reactions to comments etc.) def get_all_reactions(driver, url, reactions_per_page=999, limit=2000, cur_length=0): if cur_length >= limit: return [] driver.get(url) html = driver.page_source.encode("utf-8", errors='replace').decode("utf-8", errors="replace") reactions = parse_likers(html) cur_length += len(reactions) reaction_urls = re.findall( '(/ufi/reaction/profile/browser/(?!.*(?:reaction_type|total_count=0)).[^"]*)', html) reaction_urls = [ '%s%s' % (BASE_URL, x.replace('&amp;', '&').replace( '?limit=10', '?limit=%d' % reactions_per_page)) for x in reaction_urls ] if (reaction_urls) and limit > cur_length: time.sleep(pause()) url = reaction_urls[0] reactions += get_all_reactions(driver, url, reactions_per_page, limit, cur_length) return reactions # Given a story.php page, return a list of (url, display name) def parse_commenters(html): return re.findall( '<h3><a class="[^"]+" href="([^"]+)\?r[^"]*">([^<]*)</a>', html) # Given a "reactions" page ufi/reaction/profile/browser/, returns a list of (url, display name) def parse_likers(html): return re.findall( '<h3 class=".[^"]*"><a href="(.[^"]*)[^"]*">(.[^<]*)</a></h3>', html) def profile_picture(driver, target_username): url = '%sphoto.php?fbid=%s' % (BASE_URL, target_username) driver.get(url) commenters = parse_commenters(driver.page_source) # given a list of [username, name] returns a list of [id, name, username] def fill_user_ids(driver, users): res = [] c = 0 msg = '[*] Retreiving user ids... ' try: for u in users: c += 1 msg = '%s[*] Retreiving user ids... %d of %d' % ('\r' * len(msg), c, len(users)) print(msg, end='\r') time.sleep(pause()) fbid = get_user_id(driver, u[0]) user = (fbid, u[1], u[0]) res.append(user) except (KeyboardInterrupt, SystemExit): print('[!] KeyboardInterrupt received. Exiting...') return res except Exception as ex: print('[!] Error while retrieving user ids') print(ex) return res return res # given a username, finds the fb user id from the source of the profile page def get_user_id(driver, username): url = 'https://www.facebook.com/%s' % username.replace('/', '') driver.get(url) fbid = re.findall('"scale":1,"userID":"([0-9]+)"}', driver.page_source) if fbid: return fbid[0] else: print('[!] Error while getting id of user %s' % username) return -1 def get_username(driver, userid): url = 'https://www.facebook.com/%s' % userid driver.get(url) time.sleep(pause()) return driver.current_url.split('/')[-1].split('?')[0] def parse_args(): parser = argparse.ArgumentParser( description='Find users who interacted with a Facebook profile.') parser.add_argument( '-fu', '--user', metavar='USERNAME', type=str, help='Username of the Facebook account that will be used for scraping') parser.add_argument( '-fp', '--password', metavar='PASSWORD', type=str, help='Username of the Facebook account that will be used for scraping') parser.add_argument( '-t', '--target', metavar='TARGET', type=str, help='Username or numeric id of the target Facebook account') parser.add_argument('-ls', '--limit-stories', metavar='LIMIT', type=int, default=20, help='Max number of stories to analyze') parser.add_argument('-lp', '--limit-photos', metavar='LIMIT', type=int, default=20, help='Max number of photos to analyze') parser.add_argument( '-lr', '--limit-reactions', metavar='LIMIT', default=1000, type=int, help='Max number of reactions to analyze for each story') parser.add_argument( '-lc', '--limit-comments', metavar='LIMIT', default=100, type=int, help='Max number of comments to analyze for each story') parser.add_argument('-o', '--output', metavar='OUTPUTFILE', type=str, help='Specify the name of the pivots output file') parser.add_argument('-csv', '--csv-output', metavar='CSVOUTPUTFILE', type=str, help='Store output file also in CSV format') parser.add_argument( '-q', '--headless', action='store_true', help='Run browser in headless mode. No browser window will be shown.') parser.add_argument('-d', '--driver-path', metavar='EXECUTABLE', type=str, help='Path to geckodriver executable') args = parser.parse_args(args=None if len(sys.argv) > 1 else ['--help']) return args def print_statistics(commenters, reactions): print('-' * 78) print(' ' * 34, end=' ') print('STATISTICS') print('-' * 78) print('Most comments:') for u in Counter(commenters).most_common(): print('[%d]\t%s (%s)' % (u[1], u[0][1], u[0][0])) print() print('Most reactions:') for u in Counter(reactions).most_common(): print('[%d]\t%s (%s)' % (u[1], u[0][1], u[0][0])) print() print('Total:') for u in Counter(commenters + reactions).most_common(): print('[%d]\t%s (%s)' % (u[1], u[0][1], u[0][0])) print() def store_csv(users, csv_file_path): print('[*] Storing users in csv file %s' % csv_file_path) with open(csv_file_path, mode='w', newline='', encoding='utf-8') as csv_file: writer = csv.writer(csv_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL) writer.writerow(['id', 'name', 'url']) for u in users: writer.writerow(u) def store_pivots(users, path): print('[*] Storing users id in file %s' % path) with open(path, 'w') as f: for u in users: f.write('%s\n' % u[0]) def check_file_exists(file): yes = {'yes', 'y', 'ye'} if os.path.isfile(file): print( '[!] Warning: output file %s already exists. Do you want to overwrite? [y/N]' % file, end=' ') choice = input().lower() if choice not in yes: sys.exit(0) def main(): print(BANNER) args = parse_args() options = Options() if args.headless: options.add_argument("--headless") driver = webdriver.Firefox(executable_path=args.driver_path, options=options) do_login(driver, args.user, args.password) check_login(driver) if args.target.isdigit(): target_id = args.target target_username = get_username(driver, target_id) else: target_id = get_user_id(driver, args.target) target_username = args.target print('[*] Selected target: %s (%s)' % (target_username, target_id)) urls_to_visit = [] commenters = [] reactions = [] users = [] print('[*] Getting photos links... ', end=" ") photos = get_all_photos(driver, target_id, args.limit_photos)[:args.limit_photos] print('%d photos found' % len(photos)) print('[*] Getting stories links... ', end=" ") stories = get_all_stories(driver, target_id, args.limit_stories)[:args.limit_stories] print('%d stories found' % len(stories)) print( '[*] Retreiving users who have interacted... press Ctrl+C when you have enough' ) msg = '' try: for url in photos + stories: commenters += parse_commenters(driver.page_source) if len(commenters) < args.limit_comments: commenters += get_all_comments(driver, url, limit=args.limit_comments) if len(reactions) < args.limit_reactions: reactions += get_all_reactions(driver, url, limit=args.limit_reactions) users = list(set(reactions).union(set(commenters))) msg = '%sUnique users: %d Comments: %d Reactions: %d' % ( '\r' * len(msg), len(users), len(commenters), len(reactions))
reactions = reactions[:args.limit_reactions] commenters = commenters[:args.limit_comments] users = list(set(reactions).union(set(commenters))) print_statistics(commenters, reactions) users = fill_user_ids(driver, users) if args.output: store_pivots(users, args.output) else: store_pivots(users, '%s-pivots.txt' % target_id) if args.csv_output: store_csv(users, args.csv_output) print('[*] Found %d comments and %d reactions from %d unique users ' % (len(commenters), len(reactions), len(users))) driver.close() if __name__ == '__main__': main()
print(msg, end='\r') except (KeyboardInterrupt, SystemExit): print('[!] KeyboardInterrupt received. %d users retrieved' % len(users))
random_line_split
headtail_resolution.py
""" This module contains the logic to resolve the head-tail orientation of a predicted video time series. """ import logging import numpy as np import numpy.ma as ma from wormpose.pose.distance_metrics import angle_distance, skeleton_distance from wormpose.pose.results_datatypes import ( BaseResults, ShuffledResults, OriginalResults, ) logging.basicConfig() logger = logging.getLogger(__name__) logger.setLevel(logging.DEBUG) # threshold to compare neighbor frames theta, to be considered continuous and belong to the same segment CONTINUOUS_ANGLES_DIST_THRESHOLD = np.deg2rad(30) # we consider frames to be part of the same segment if they are maximum this amount of seconds apart # (and satisfy the distance threshold) CONTINOUS_SEGMENT_TIME_WINDOW_SEC = 0.2 # discard too small segments less than this amount of seconds MIN_SEGMENT_SIZE_SEC = 0.2 # don't align isolated segments that are more than this amount of seconds apart from aligned segments MAXIMUM_GAP_ALLOWED_WITH_ADJACENT_SEGMENT_SEC = 1 def _init_partitioned_series(shuffled_series: np.ndarray): return ma.masked_all_like(shuffled_series) def _set_partition(partitioned_series, shuffled_series, frame_index: int, partition: int): partitioned_series[frame_index][0] = shuffled_series[frame_index, partition] partitioned_series[frame_index][1] = shuffled_series[frame_index, 1 - partition] class _PartitionedResults(BaseResults): def __init__(self, shuffled_results: ShuffledResults): self.cur_partition = -1 self.partitions = ma.masked_all((len(shuffled_results),), dtype=int) self._shuffled_results = shuffled_results theta = _init_partitioned_series(shuffled_results.theta) skeletons = _init_partitioned_series(shuffled_results.skeletons) scores = _init_partitioned_series(shuffled_results.scores) super().__init__(theta=theta, skeletons=skeletons, scores=scores) def mask(self, indices): self.theta.mask[indices] = True self.skeletons.mask[indices] = True self.scores.mask[indices] = True self.partitions.mask[indices] = True def set_partition(self, frame_index: int, partition: int, new_partition: bool = False): if new_partition: self.cur_partition += 1 _set_partition(self.theta, self._shuffled_results.theta, frame_index, partition) _set_partition(self.skeletons, self._shuffled_results.skeletons, frame_index, partition) _set_partition(self.scores, self._shuffled_results.scores, frame_index, partition) self.partitions[frame_index] = self.cur_partition def _get_partition_indices(self, partition_index: int): return np.where(self.partitions == partition_index)[0] def get_segments(self): all_partitions_indexes = np.unique(self.partitions.filled(-1)) return [ self._get_partition_indices(partition_index) for partition_index in all_partitions_indexes if partition_index >= 0 ] class _ResolvedResults(BaseResults): def __init__(self, partitioned_results: _PartitionedResults): self._partitioned_results = partitioned_results theta = _init_unified_series(partitioned_results.theta) skeletons = _init_unified_series(partitioned_results.skeletons) scores = _init_unified_series(partitioned_results.scores) super().__init__(theta=theta, skeletons=skeletons, scores=scores) def resolve(self, segment, segment_alignment): self.scores[segment] = self._partitioned_results.scores[segment][:, segment_alignment] self.skeletons[segment] = self._partitioned_results.skeletons[segment][:, segment_alignment] self.theta[segment] = self._partitioned_results.theta[segment][:, segment_alignment] def mask(self, indices): self.theta.mask[indices] = True self.skeletons.mask[indices] = True self.scores.mask[indices] = True def num_valid(self): return np.sum(~self.scores.mask) class _FinalResults(BaseResults): @classmethod def from_resolved(cls, resolved_results: _ResolvedResults): return _FinalResults( theta=resolved_results.theta.filled(np.nan), skeletons=resolved_results.skeletons.filled(np.nan), scores=resolved_results.scores.filled(np.nan), ) @classmethod def from_shuffled(cls, shuffled_results: ShuffledResults): return _FinalResults( theta=np.full_like(shuffled_results.theta[:, 0], np.nan), skeletons=np.full_like(shuffled_results.scores[:, 0], np.nan), scores=np.full_like(shuffled_results.skeletons[:, 0], np.nan), ) def _make_continuous_partitions( shuffled_results: ShuffledResults, score_threshold: float, frame_rate: float ) -> _PartitionedResults: time_window = max(1, int(frame_rate * CONTINOUS_SEGMENT_TIME_WINDOW_SEC)) min_segment_size = max(1, int(frame_rate * MIN_SEGMENT_SIZE_SEC)) partitioned_results = _PartitionedResults(shuffled_results) # discard low score frames early (use the maximum value of both scores for now) good_score_frames = np.where(ma.greater_equal(ma.max(shuffled_results.scores, axis=1), score_threshold))[0] for frame_index in good_score_frames: prev_theta = partitioned_results.theta[frame_index - min(time_window, frame_index) : frame_index, 0] # if there is a big gap > time_window we start a new partition, with a random value (0) if np.all(np.any(prev_theta.mask, axis=1)): partitioned_results.set_partition(frame_index=frame_index, partition=0, new_partition=True) # otherwise we look in the time_window close past the closest non nan frame see if we can continue the # partition as long as the values stay continuous else: last_valid_index = np.where(~np.any(prev_theta.mask, axis=1))[0][-1] dists = [ angle_distance( shuffled_results.theta[frame_index, k, :], prev_theta[last_valid_index], ) for k in range(2) ] partition = int(np.argmin(dists)) if dists[partition] < CONTINUOUS_ANGLES_DIST_THRESHOLD: partitioned_results.set_partition(frame_index=frame_index, partition=partition) # discard short segments for cur_partition_indices in partitioned_results.get_segments(): if len(cur_partition_indices) < min_segment_size: partitioned_results.mask(cur_partition_indices) return partitioned_results def _align_segments_with_labels(segments, partitioned_skeletons, labelled_skeletons, min_labelled=5): """ Match the head/tail alignment with the results of the classical tracking in each of the segments, if there is enough labelled data in the segment """ segments_alignment = ma.masked_all((len(segments),), dtype=np.uint8) for segment_index, segment in enumerate(segments):
return segments_alignment def _calculate_smallest_gap_to_adjacent(segment_index, segments, segments_alignment): # evaluate how far away this segment is from known values score = np.nan segment_offset = np.nan if segment_index - 1 >= 0 and not segments_alignment.mask[segment_index - 1]: gap = segments[segment_index][0] - segments[segment_index - 1][-1] score = gap segment_offset = -1 if segment_index + 1 < len(segments_alignment) and not segments_alignment.mask[segment_index + 1]: gap = segments[segment_index + 1][0] - segments[segment_index][-1] if np.isnan(score) or gap < score: score = gap segment_offset = 1 return score, segment_offset def _align_unlabelled_segments_with_adjacents(segments, segments_alignment, partitioned_skeletons, frame_rate: float): """ Resolve the unaligned segments by comparing with adjacent segments, starting with the segments that have the least frames gap between an adjacent trusted segment Don't align isolated segments which a big gap between trusted segments """ maximum_gap_allowed = max(1, int(frame_rate * MAXIMUM_GAP_ALLOWED_WITH_ADJACENT_SEGMENT_SEC)) # ensure that if no segments have been aligned at all, pick one solution randomly to start if np.all(segments_alignment.mask): logger.info("There are no trusted segments with head decision to resolve the whole video, stopping analysis.") return segments_alignment # fix in priority the segments with known adjacent frames with little gap # until all segments are aligned except the isolated ones (further than maximum_gap_allowed) unaligned = np.where(segments_alignment.mask)[0] while len(unaligned) > 0: # we first pick the best candidate segment to align (there are known frames nearby before or after or both) all_gaps = [ _calculate_smallest_gap_to_adjacent( segment_index=x, segments=segments, segments_alignment=segments_alignment, ) for x in unaligned ] segment_to_fix_index = np.nanargmin(all_gaps, axis=0)[0] gap_to_adjacent_segment, adjacent_segment_offset = all_gaps[segment_to_fix_index] # abort if only isolated segments are left if gap_to_adjacent_segment > maximum_gap_allowed: break cur_segment_index = unaligned[segment_to_fix_index] cur_segment_skeleton = partitioned_skeletons[segments[cur_segment_index]] adjacent_segment_index = cur_segment_index + adjacent_segment_offset adjacent_alignment = segments_alignment[adjacent_segment_index] adjacent_segment = segments[adjacent_segment_index] adjacent_segment_skeleton = partitioned_skeletons[adjacent_segment][:, adjacent_alignment] if adjacent_segment_offset == -1: closest_unaligned_skeleton = cur_segment_skeleton[0] # first frame of cur segment closest_known_skeleton = adjacent_segment_skeleton[-1] # last frame of prev segment elif adjacent_segment_offset == 1: closest_unaligned_skeleton = cur_segment_skeleton[-1] # last frame of cur segment closest_known_skeleton = adjacent_segment_skeleton[0] # first frame of next segment else: raise ValueError() dists = [skeleton_distance(closest_known_skeleton, skel) for skel in closest_unaligned_skeleton] segments_alignment[cur_segment_index] = int(np.argmax(dists)) unaligned = np.where(segments_alignment.mask)[0] return segments_alignment def _init_unified_series(mixed_series): return ma.masked_all((mixed_series.shape[0],) + mixed_series.shape[2:], dtype=mixed_series.dtype) def resolve_head_tail( shuffled_results: ShuffledResults, original_results: OriginalResults, frame_rate: float, score_threshold, ) -> BaseResults: len_series = len(shuffled_results) # Create continuous segments without jumps partitioned_results = _make_continuous_partitions( score_threshold=score_threshold, frame_rate=frame_rate, shuffled_results=shuffled_results, ) segments = partitioned_results.get_segments() if len(segments) == 0: logger.error( f"Couldn't find any continuous segments of predicted data above the threshold {score_threshold}," f" stopping analysis." ) return _FinalResults.from_shuffled(shuffled_results) # Choose each segment global alignment by comparing with labelled data segments_alignment = _align_segments_with_labels( segments, partitioned_results.skeletons, original_results.skeletons ) # Fix unaligned segments here by comparing skeletons with neighboring segments iteratively segments_alignment = _align_unlabelled_segments_with_adjacents( segments, segments_alignment, partitioned_results.skeletons, frame_rate ) # Compile results resolved_results = _ResolvedResults(partitioned_results) for segment, segment_alignment in zip(segments, segments_alignment): if not ma.is_masked(segment_alignment): resolved_results.resolve(segment, segment_alignment) # Filter the final results again by score threshold low_scores_indices = np.where(ma.masked_less(resolved_results.scores, score_threshold).mask)[0] resolved_results.mask(low_scores_indices) num_success = resolved_results.num_valid() original_num_success = np.any(~np.isnan(original_results.skeletons), axis=(1, 2)).sum() logger.info( f"Resolved head/tail, {num_success} out of {len_series} frames analyzed successfully " f"({float(num_success) / len_series * 100:.1f}%) (original features : {original_num_success}" f" or {(float(original_num_success) / len_series * 100):.1f}% of total)" ) if num_success < original_num_success: logger.warning(f"Original results had {original_num_success - num_success} more successfully analyzed frames!") return _FinalResults.from_resolved(resolved_results)
segment_skeletons = labelled_skeletons[segment] non_nan_labelled = np.any(~np.isnan(segment_skeletons), axis=(1, 2)) labels_count = np.sum(non_nan_labelled) non_masked = ~np.any(partitioned_skeletons[segment].mask, axis=(1, 2, 3)) to_compare = np.logical_and(non_nan_labelled, non_masked) similarity_scores = [] for label_skel, partitioned_skeleton in zip( segment_skeletons[to_compare], partitioned_skeletons[segment][to_compare] ): dists = [skeleton_distance(label_skel, x) for x in partitioned_skeleton] similarity_scores.append(dists) if len(similarity_scores) > 0: mean_similarity_scores = np.mean(similarity_scores, axis=0) if mean_similarity_scores[0] * mean_similarity_scores[1] < 0 and labels_count > min_labelled: segments_alignment[segment_index] = np.argmax(mean_similarity_scores)
conditional_block
headtail_resolution.py
""" This module contains the logic to resolve the head-tail orientation of a predicted video time series. """ import logging import numpy as np import numpy.ma as ma from wormpose.pose.distance_metrics import angle_distance, skeleton_distance from wormpose.pose.results_datatypes import ( BaseResults, ShuffledResults, OriginalResults, ) logging.basicConfig() logger = logging.getLogger(__name__) logger.setLevel(logging.DEBUG) # threshold to compare neighbor frames theta, to be considered continuous and belong to the same segment CONTINUOUS_ANGLES_DIST_THRESHOLD = np.deg2rad(30) # we consider frames to be part of the same segment if they are maximum this amount of seconds apart # (and satisfy the distance threshold) CONTINOUS_SEGMENT_TIME_WINDOW_SEC = 0.2 # discard too small segments less than this amount of seconds MIN_SEGMENT_SIZE_SEC = 0.2 # don't align isolated segments that are more than this amount of seconds apart from aligned segments MAXIMUM_GAP_ALLOWED_WITH_ADJACENT_SEGMENT_SEC = 1 def _init_partitioned_series(shuffled_series: np.ndarray): return ma.masked_all_like(shuffled_series) def
(partitioned_series, shuffled_series, frame_index: int, partition: int): partitioned_series[frame_index][0] = shuffled_series[frame_index, partition] partitioned_series[frame_index][1] = shuffled_series[frame_index, 1 - partition] class _PartitionedResults(BaseResults): def __init__(self, shuffled_results: ShuffledResults): self.cur_partition = -1 self.partitions = ma.masked_all((len(shuffled_results),), dtype=int) self._shuffled_results = shuffled_results theta = _init_partitioned_series(shuffled_results.theta) skeletons = _init_partitioned_series(shuffled_results.skeletons) scores = _init_partitioned_series(shuffled_results.scores) super().__init__(theta=theta, skeletons=skeletons, scores=scores) def mask(self, indices): self.theta.mask[indices] = True self.skeletons.mask[indices] = True self.scores.mask[indices] = True self.partitions.mask[indices] = True def set_partition(self, frame_index: int, partition: int, new_partition: bool = False): if new_partition: self.cur_partition += 1 _set_partition(self.theta, self._shuffled_results.theta, frame_index, partition) _set_partition(self.skeletons, self._shuffled_results.skeletons, frame_index, partition) _set_partition(self.scores, self._shuffled_results.scores, frame_index, partition) self.partitions[frame_index] = self.cur_partition def _get_partition_indices(self, partition_index: int): return np.where(self.partitions == partition_index)[0] def get_segments(self): all_partitions_indexes = np.unique(self.partitions.filled(-1)) return [ self._get_partition_indices(partition_index) for partition_index in all_partitions_indexes if partition_index >= 0 ] class _ResolvedResults(BaseResults): def __init__(self, partitioned_results: _PartitionedResults): self._partitioned_results = partitioned_results theta = _init_unified_series(partitioned_results.theta) skeletons = _init_unified_series(partitioned_results.skeletons) scores = _init_unified_series(partitioned_results.scores) super().__init__(theta=theta, skeletons=skeletons, scores=scores) def resolve(self, segment, segment_alignment): self.scores[segment] = self._partitioned_results.scores[segment][:, segment_alignment] self.skeletons[segment] = self._partitioned_results.skeletons[segment][:, segment_alignment] self.theta[segment] = self._partitioned_results.theta[segment][:, segment_alignment] def mask(self, indices): self.theta.mask[indices] = True self.skeletons.mask[indices] = True self.scores.mask[indices] = True def num_valid(self): return np.sum(~self.scores.mask) class _FinalResults(BaseResults): @classmethod def from_resolved(cls, resolved_results: _ResolvedResults): return _FinalResults( theta=resolved_results.theta.filled(np.nan), skeletons=resolved_results.skeletons.filled(np.nan), scores=resolved_results.scores.filled(np.nan), ) @classmethod def from_shuffled(cls, shuffled_results: ShuffledResults): return _FinalResults( theta=np.full_like(shuffled_results.theta[:, 0], np.nan), skeletons=np.full_like(shuffled_results.scores[:, 0], np.nan), scores=np.full_like(shuffled_results.skeletons[:, 0], np.nan), ) def _make_continuous_partitions( shuffled_results: ShuffledResults, score_threshold: float, frame_rate: float ) -> _PartitionedResults: time_window = max(1, int(frame_rate * CONTINOUS_SEGMENT_TIME_WINDOW_SEC)) min_segment_size = max(1, int(frame_rate * MIN_SEGMENT_SIZE_SEC)) partitioned_results = _PartitionedResults(shuffled_results) # discard low score frames early (use the maximum value of both scores for now) good_score_frames = np.where(ma.greater_equal(ma.max(shuffled_results.scores, axis=1), score_threshold))[0] for frame_index in good_score_frames: prev_theta = partitioned_results.theta[frame_index - min(time_window, frame_index) : frame_index, 0] # if there is a big gap > time_window we start a new partition, with a random value (0) if np.all(np.any(prev_theta.mask, axis=1)): partitioned_results.set_partition(frame_index=frame_index, partition=0, new_partition=True) # otherwise we look in the time_window close past the closest non nan frame see if we can continue the # partition as long as the values stay continuous else: last_valid_index = np.where(~np.any(prev_theta.mask, axis=1))[0][-1] dists = [ angle_distance( shuffled_results.theta[frame_index, k, :], prev_theta[last_valid_index], ) for k in range(2) ] partition = int(np.argmin(dists)) if dists[partition] < CONTINUOUS_ANGLES_DIST_THRESHOLD: partitioned_results.set_partition(frame_index=frame_index, partition=partition) # discard short segments for cur_partition_indices in partitioned_results.get_segments(): if len(cur_partition_indices) < min_segment_size: partitioned_results.mask(cur_partition_indices) return partitioned_results def _align_segments_with_labels(segments, partitioned_skeletons, labelled_skeletons, min_labelled=5): """ Match the head/tail alignment with the results of the classical tracking in each of the segments, if there is enough labelled data in the segment """ segments_alignment = ma.masked_all((len(segments),), dtype=np.uint8) for segment_index, segment in enumerate(segments): segment_skeletons = labelled_skeletons[segment] non_nan_labelled = np.any(~np.isnan(segment_skeletons), axis=(1, 2)) labels_count = np.sum(non_nan_labelled) non_masked = ~np.any(partitioned_skeletons[segment].mask, axis=(1, 2, 3)) to_compare = np.logical_and(non_nan_labelled, non_masked) similarity_scores = [] for label_skel, partitioned_skeleton in zip( segment_skeletons[to_compare], partitioned_skeletons[segment][to_compare] ): dists = [skeleton_distance(label_skel, x) for x in partitioned_skeleton] similarity_scores.append(dists) if len(similarity_scores) > 0: mean_similarity_scores = np.mean(similarity_scores, axis=0) if mean_similarity_scores[0] * mean_similarity_scores[1] < 0 and labels_count > min_labelled: segments_alignment[segment_index] = np.argmax(mean_similarity_scores) return segments_alignment def _calculate_smallest_gap_to_adjacent(segment_index, segments, segments_alignment): # evaluate how far away this segment is from known values score = np.nan segment_offset = np.nan if segment_index - 1 >= 0 and not segments_alignment.mask[segment_index - 1]: gap = segments[segment_index][0] - segments[segment_index - 1][-1] score = gap segment_offset = -1 if segment_index + 1 < len(segments_alignment) and not segments_alignment.mask[segment_index + 1]: gap = segments[segment_index + 1][0] - segments[segment_index][-1] if np.isnan(score) or gap < score: score = gap segment_offset = 1 return score, segment_offset def _align_unlabelled_segments_with_adjacents(segments, segments_alignment, partitioned_skeletons, frame_rate: float): """ Resolve the unaligned segments by comparing with adjacent segments, starting with the segments that have the least frames gap between an adjacent trusted segment Don't align isolated segments which a big gap between trusted segments """ maximum_gap_allowed = max(1, int(frame_rate * MAXIMUM_GAP_ALLOWED_WITH_ADJACENT_SEGMENT_SEC)) # ensure that if no segments have been aligned at all, pick one solution randomly to start if np.all(segments_alignment.mask): logger.info("There are no trusted segments with head decision to resolve the whole video, stopping analysis.") return segments_alignment # fix in priority the segments with known adjacent frames with little gap # until all segments are aligned except the isolated ones (further than maximum_gap_allowed) unaligned = np.where(segments_alignment.mask)[0] while len(unaligned) > 0: # we first pick the best candidate segment to align (there are known frames nearby before or after or both) all_gaps = [ _calculate_smallest_gap_to_adjacent( segment_index=x, segments=segments, segments_alignment=segments_alignment, ) for x in unaligned ] segment_to_fix_index = np.nanargmin(all_gaps, axis=0)[0] gap_to_adjacent_segment, adjacent_segment_offset = all_gaps[segment_to_fix_index] # abort if only isolated segments are left if gap_to_adjacent_segment > maximum_gap_allowed: break cur_segment_index = unaligned[segment_to_fix_index] cur_segment_skeleton = partitioned_skeletons[segments[cur_segment_index]] adjacent_segment_index = cur_segment_index + adjacent_segment_offset adjacent_alignment = segments_alignment[adjacent_segment_index] adjacent_segment = segments[adjacent_segment_index] adjacent_segment_skeleton = partitioned_skeletons[adjacent_segment][:, adjacent_alignment] if adjacent_segment_offset == -1: closest_unaligned_skeleton = cur_segment_skeleton[0] # first frame of cur segment closest_known_skeleton = adjacent_segment_skeleton[-1] # last frame of prev segment elif adjacent_segment_offset == 1: closest_unaligned_skeleton = cur_segment_skeleton[-1] # last frame of cur segment closest_known_skeleton = adjacent_segment_skeleton[0] # first frame of next segment else: raise ValueError() dists = [skeleton_distance(closest_known_skeleton, skel) for skel in closest_unaligned_skeleton] segments_alignment[cur_segment_index] = int(np.argmax(dists)) unaligned = np.where(segments_alignment.mask)[0] return segments_alignment def _init_unified_series(mixed_series): return ma.masked_all((mixed_series.shape[0],) + mixed_series.shape[2:], dtype=mixed_series.dtype) def resolve_head_tail( shuffled_results: ShuffledResults, original_results: OriginalResults, frame_rate: float, score_threshold, ) -> BaseResults: len_series = len(shuffled_results) # Create continuous segments without jumps partitioned_results = _make_continuous_partitions( score_threshold=score_threshold, frame_rate=frame_rate, shuffled_results=shuffled_results, ) segments = partitioned_results.get_segments() if len(segments) == 0: logger.error( f"Couldn't find any continuous segments of predicted data above the threshold {score_threshold}," f" stopping analysis." ) return _FinalResults.from_shuffled(shuffled_results) # Choose each segment global alignment by comparing with labelled data segments_alignment = _align_segments_with_labels( segments, partitioned_results.skeletons, original_results.skeletons ) # Fix unaligned segments here by comparing skeletons with neighboring segments iteratively segments_alignment = _align_unlabelled_segments_with_adjacents( segments, segments_alignment, partitioned_results.skeletons, frame_rate ) # Compile results resolved_results = _ResolvedResults(partitioned_results) for segment, segment_alignment in zip(segments, segments_alignment): if not ma.is_masked(segment_alignment): resolved_results.resolve(segment, segment_alignment) # Filter the final results again by score threshold low_scores_indices = np.where(ma.masked_less(resolved_results.scores, score_threshold).mask)[0] resolved_results.mask(low_scores_indices) num_success = resolved_results.num_valid() original_num_success = np.any(~np.isnan(original_results.skeletons), axis=(1, 2)).sum() logger.info( f"Resolved head/tail, {num_success} out of {len_series} frames analyzed successfully " f"({float(num_success) / len_series * 100:.1f}%) (original features : {original_num_success}" f" or {(float(original_num_success) / len_series * 100):.1f}% of total)" ) if num_success < original_num_success: logger.warning(f"Original results had {original_num_success - num_success} more successfully analyzed frames!") return _FinalResults.from_resolved(resolved_results)
_set_partition
identifier_name
headtail_resolution.py
""" This module contains the logic to resolve the head-tail orientation of a predicted video time series. """ import logging import numpy as np import numpy.ma as ma from wormpose.pose.distance_metrics import angle_distance, skeleton_distance from wormpose.pose.results_datatypes import ( BaseResults, ShuffledResults, OriginalResults, ) logging.basicConfig() logger = logging.getLogger(__name__) logger.setLevel(logging.DEBUG) # threshold to compare neighbor frames theta, to be considered continuous and belong to the same segment CONTINUOUS_ANGLES_DIST_THRESHOLD = np.deg2rad(30) # we consider frames to be part of the same segment if they are maximum this amount of seconds apart # (and satisfy the distance threshold) CONTINOUS_SEGMENT_TIME_WINDOW_SEC = 0.2 # discard too small segments less than this amount of seconds MIN_SEGMENT_SIZE_SEC = 0.2 # don't align isolated segments that are more than this amount of seconds apart from aligned segments MAXIMUM_GAP_ALLOWED_WITH_ADJACENT_SEGMENT_SEC = 1 def _init_partitioned_series(shuffled_series: np.ndarray): return ma.masked_all_like(shuffled_series) def _set_partition(partitioned_series, shuffled_series, frame_index: int, partition: int): partitioned_series[frame_index][0] = shuffled_series[frame_index, partition] partitioned_series[frame_index][1] = shuffled_series[frame_index, 1 - partition] class _PartitionedResults(BaseResults): def __init__(self, shuffled_results: ShuffledResults): self.cur_partition = -1 self.partitions = ma.masked_all((len(shuffled_results),), dtype=int) self._shuffled_results = shuffled_results theta = _init_partitioned_series(shuffled_results.theta) skeletons = _init_partitioned_series(shuffled_results.skeletons) scores = _init_partitioned_series(shuffled_results.scores) super().__init__(theta=theta, skeletons=skeletons, scores=scores) def mask(self, indices): self.theta.mask[indices] = True self.skeletons.mask[indices] = True self.scores.mask[indices] = True self.partitions.mask[indices] = True def set_partition(self, frame_index: int, partition: int, new_partition: bool = False): if new_partition: self.cur_partition += 1 _set_partition(self.theta, self._shuffled_results.theta, frame_index, partition) _set_partition(self.skeletons, self._shuffled_results.skeletons, frame_index, partition) _set_partition(self.scores, self._shuffled_results.scores, frame_index, partition) self.partitions[frame_index] = self.cur_partition def _get_partition_indices(self, partition_index: int): return np.where(self.partitions == partition_index)[0] def get_segments(self): all_partitions_indexes = np.unique(self.partitions.filled(-1)) return [ self._get_partition_indices(partition_index) for partition_index in all_partitions_indexes if partition_index >= 0 ] class _ResolvedResults(BaseResults): def __init__(self, partitioned_results: _PartitionedResults): self._partitioned_results = partitioned_results theta = _init_unified_series(partitioned_results.theta) skeletons = _init_unified_series(partitioned_results.skeletons) scores = _init_unified_series(partitioned_results.scores) super().__init__(theta=theta, skeletons=skeletons, scores=scores) def resolve(self, segment, segment_alignment):
def mask(self, indices): self.theta.mask[indices] = True self.skeletons.mask[indices] = True self.scores.mask[indices] = True def num_valid(self): return np.sum(~self.scores.mask) class _FinalResults(BaseResults): @classmethod def from_resolved(cls, resolved_results: _ResolvedResults): return _FinalResults( theta=resolved_results.theta.filled(np.nan), skeletons=resolved_results.skeletons.filled(np.nan), scores=resolved_results.scores.filled(np.nan), ) @classmethod def from_shuffled(cls, shuffled_results: ShuffledResults): return _FinalResults( theta=np.full_like(shuffled_results.theta[:, 0], np.nan), skeletons=np.full_like(shuffled_results.scores[:, 0], np.nan), scores=np.full_like(shuffled_results.skeletons[:, 0], np.nan), ) def _make_continuous_partitions( shuffled_results: ShuffledResults, score_threshold: float, frame_rate: float ) -> _PartitionedResults: time_window = max(1, int(frame_rate * CONTINOUS_SEGMENT_TIME_WINDOW_SEC)) min_segment_size = max(1, int(frame_rate * MIN_SEGMENT_SIZE_SEC)) partitioned_results = _PartitionedResults(shuffled_results) # discard low score frames early (use the maximum value of both scores for now) good_score_frames = np.where(ma.greater_equal(ma.max(shuffled_results.scores, axis=1), score_threshold))[0] for frame_index in good_score_frames: prev_theta = partitioned_results.theta[frame_index - min(time_window, frame_index) : frame_index, 0] # if there is a big gap > time_window we start a new partition, with a random value (0) if np.all(np.any(prev_theta.mask, axis=1)): partitioned_results.set_partition(frame_index=frame_index, partition=0, new_partition=True) # otherwise we look in the time_window close past the closest non nan frame see if we can continue the # partition as long as the values stay continuous else: last_valid_index = np.where(~np.any(prev_theta.mask, axis=1))[0][-1] dists = [ angle_distance( shuffled_results.theta[frame_index, k, :], prev_theta[last_valid_index], ) for k in range(2) ] partition = int(np.argmin(dists)) if dists[partition] < CONTINUOUS_ANGLES_DIST_THRESHOLD: partitioned_results.set_partition(frame_index=frame_index, partition=partition) # discard short segments for cur_partition_indices in partitioned_results.get_segments(): if len(cur_partition_indices) < min_segment_size: partitioned_results.mask(cur_partition_indices) return partitioned_results def _align_segments_with_labels(segments, partitioned_skeletons, labelled_skeletons, min_labelled=5): """ Match the head/tail alignment with the results of the classical tracking in each of the segments, if there is enough labelled data in the segment """ segments_alignment = ma.masked_all((len(segments),), dtype=np.uint8) for segment_index, segment in enumerate(segments): segment_skeletons = labelled_skeletons[segment] non_nan_labelled = np.any(~np.isnan(segment_skeletons), axis=(1, 2)) labels_count = np.sum(non_nan_labelled) non_masked = ~np.any(partitioned_skeletons[segment].mask, axis=(1, 2, 3)) to_compare = np.logical_and(non_nan_labelled, non_masked) similarity_scores = [] for label_skel, partitioned_skeleton in zip( segment_skeletons[to_compare], partitioned_skeletons[segment][to_compare] ): dists = [skeleton_distance(label_skel, x) for x in partitioned_skeleton] similarity_scores.append(dists) if len(similarity_scores) > 0: mean_similarity_scores = np.mean(similarity_scores, axis=0) if mean_similarity_scores[0] * mean_similarity_scores[1] < 0 and labels_count > min_labelled: segments_alignment[segment_index] = np.argmax(mean_similarity_scores) return segments_alignment def _calculate_smallest_gap_to_adjacent(segment_index, segments, segments_alignment): # evaluate how far away this segment is from known values score = np.nan segment_offset = np.nan if segment_index - 1 >= 0 and not segments_alignment.mask[segment_index - 1]: gap = segments[segment_index][0] - segments[segment_index - 1][-1] score = gap segment_offset = -1 if segment_index + 1 < len(segments_alignment) and not segments_alignment.mask[segment_index + 1]: gap = segments[segment_index + 1][0] - segments[segment_index][-1] if np.isnan(score) or gap < score: score = gap segment_offset = 1 return score, segment_offset def _align_unlabelled_segments_with_adjacents(segments, segments_alignment, partitioned_skeletons, frame_rate: float): """ Resolve the unaligned segments by comparing with adjacent segments, starting with the segments that have the least frames gap between an adjacent trusted segment Don't align isolated segments which a big gap between trusted segments """ maximum_gap_allowed = max(1, int(frame_rate * MAXIMUM_GAP_ALLOWED_WITH_ADJACENT_SEGMENT_SEC)) # ensure that if no segments have been aligned at all, pick one solution randomly to start if np.all(segments_alignment.mask): logger.info("There are no trusted segments with head decision to resolve the whole video, stopping analysis.") return segments_alignment # fix in priority the segments with known adjacent frames with little gap # until all segments are aligned except the isolated ones (further than maximum_gap_allowed) unaligned = np.where(segments_alignment.mask)[0] while len(unaligned) > 0: # we first pick the best candidate segment to align (there are known frames nearby before or after or both) all_gaps = [ _calculate_smallest_gap_to_adjacent( segment_index=x, segments=segments, segments_alignment=segments_alignment, ) for x in unaligned ] segment_to_fix_index = np.nanargmin(all_gaps, axis=0)[0] gap_to_adjacent_segment, adjacent_segment_offset = all_gaps[segment_to_fix_index] # abort if only isolated segments are left if gap_to_adjacent_segment > maximum_gap_allowed: break cur_segment_index = unaligned[segment_to_fix_index] cur_segment_skeleton = partitioned_skeletons[segments[cur_segment_index]] adjacent_segment_index = cur_segment_index + adjacent_segment_offset adjacent_alignment = segments_alignment[adjacent_segment_index] adjacent_segment = segments[adjacent_segment_index] adjacent_segment_skeleton = partitioned_skeletons[adjacent_segment][:, adjacent_alignment] if adjacent_segment_offset == -1: closest_unaligned_skeleton = cur_segment_skeleton[0] # first frame of cur segment closest_known_skeleton = adjacent_segment_skeleton[-1] # last frame of prev segment elif adjacent_segment_offset == 1: closest_unaligned_skeleton = cur_segment_skeleton[-1] # last frame of cur segment closest_known_skeleton = adjacent_segment_skeleton[0] # first frame of next segment else: raise ValueError() dists = [skeleton_distance(closest_known_skeleton, skel) for skel in closest_unaligned_skeleton] segments_alignment[cur_segment_index] = int(np.argmax(dists)) unaligned = np.where(segments_alignment.mask)[0] return segments_alignment def _init_unified_series(mixed_series): return ma.masked_all((mixed_series.shape[0],) + mixed_series.shape[2:], dtype=mixed_series.dtype) def resolve_head_tail( shuffled_results: ShuffledResults, original_results: OriginalResults, frame_rate: float, score_threshold, ) -> BaseResults: len_series = len(shuffled_results) # Create continuous segments without jumps partitioned_results = _make_continuous_partitions( score_threshold=score_threshold, frame_rate=frame_rate, shuffled_results=shuffled_results, ) segments = partitioned_results.get_segments() if len(segments) == 0: logger.error( f"Couldn't find any continuous segments of predicted data above the threshold {score_threshold}," f" stopping analysis." ) return _FinalResults.from_shuffled(shuffled_results) # Choose each segment global alignment by comparing with labelled data segments_alignment = _align_segments_with_labels( segments, partitioned_results.skeletons, original_results.skeletons ) # Fix unaligned segments here by comparing skeletons with neighboring segments iteratively segments_alignment = _align_unlabelled_segments_with_adjacents( segments, segments_alignment, partitioned_results.skeletons, frame_rate ) # Compile results resolved_results = _ResolvedResults(partitioned_results) for segment, segment_alignment in zip(segments, segments_alignment): if not ma.is_masked(segment_alignment): resolved_results.resolve(segment, segment_alignment) # Filter the final results again by score threshold low_scores_indices = np.where(ma.masked_less(resolved_results.scores, score_threshold).mask)[0] resolved_results.mask(low_scores_indices) num_success = resolved_results.num_valid() original_num_success = np.any(~np.isnan(original_results.skeletons), axis=(1, 2)).sum() logger.info( f"Resolved head/tail, {num_success} out of {len_series} frames analyzed successfully " f"({float(num_success) / len_series * 100:.1f}%) (original features : {original_num_success}" f" or {(float(original_num_success) / len_series * 100):.1f}% of total)" ) if num_success < original_num_success: logger.warning(f"Original results had {original_num_success - num_success} more successfully analyzed frames!") return _FinalResults.from_resolved(resolved_results)
self.scores[segment] = self._partitioned_results.scores[segment][:, segment_alignment] self.skeletons[segment] = self._partitioned_results.skeletons[segment][:, segment_alignment] self.theta[segment] = self._partitioned_results.theta[segment][:, segment_alignment]
identifier_body
headtail_resolution.py
""" This module contains the logic to resolve the head-tail orientation of a predicted video time series. """ import logging import numpy as np import numpy.ma as ma from wormpose.pose.distance_metrics import angle_distance, skeleton_distance from wormpose.pose.results_datatypes import ( BaseResults, ShuffledResults, OriginalResults, ) logging.basicConfig() logger = logging.getLogger(__name__) logger.setLevel(logging.DEBUG) # threshold to compare neighbor frames theta, to be considered continuous and belong to the same segment CONTINUOUS_ANGLES_DIST_THRESHOLD = np.deg2rad(30) # we consider frames to be part of the same segment if they are maximum this amount of seconds apart # (and satisfy the distance threshold) CONTINOUS_SEGMENT_TIME_WINDOW_SEC = 0.2 # discard too small segments less than this amount of seconds MIN_SEGMENT_SIZE_SEC = 0.2 # don't align isolated segments that are more than this amount of seconds apart from aligned segments MAXIMUM_GAP_ALLOWED_WITH_ADJACENT_SEGMENT_SEC = 1 def _init_partitioned_series(shuffled_series: np.ndarray): return ma.masked_all_like(shuffled_series) def _set_partition(partitioned_series, shuffled_series, frame_index: int, partition: int): partitioned_series[frame_index][0] = shuffled_series[frame_index, partition] partitioned_series[frame_index][1] = shuffled_series[frame_index, 1 - partition] class _PartitionedResults(BaseResults): def __init__(self, shuffled_results: ShuffledResults): self.cur_partition = -1 self.partitions = ma.masked_all((len(shuffled_results),), dtype=int) self._shuffled_results = shuffled_results theta = _init_partitioned_series(shuffled_results.theta) skeletons = _init_partitioned_series(shuffled_results.skeletons) scores = _init_partitioned_series(shuffled_results.scores) super().__init__(theta=theta, skeletons=skeletons, scores=scores) def mask(self, indices):
self.theta.mask[indices] = True self.skeletons.mask[indices] = True self.scores.mask[indices] = True self.partitions.mask[indices] = True def set_partition(self, frame_index: int, partition: int, new_partition: bool = False): if new_partition: self.cur_partition += 1 _set_partition(self.theta, self._shuffled_results.theta, frame_index, partition) _set_partition(self.skeletons, self._shuffled_results.skeletons, frame_index, partition) _set_partition(self.scores, self._shuffled_results.scores, frame_index, partition) self.partitions[frame_index] = self.cur_partition def _get_partition_indices(self, partition_index: int): return np.where(self.partitions == partition_index)[0] def get_segments(self): all_partitions_indexes = np.unique(self.partitions.filled(-1)) return [ self._get_partition_indices(partition_index) for partition_index in all_partitions_indexes if partition_index >= 0 ] class _ResolvedResults(BaseResults): def __init__(self, partitioned_results: _PartitionedResults): self._partitioned_results = partitioned_results theta = _init_unified_series(partitioned_results.theta) skeletons = _init_unified_series(partitioned_results.skeletons) scores = _init_unified_series(partitioned_results.scores) super().__init__(theta=theta, skeletons=skeletons, scores=scores) def resolve(self, segment, segment_alignment): self.scores[segment] = self._partitioned_results.scores[segment][:, segment_alignment] self.skeletons[segment] = self._partitioned_results.skeletons[segment][:, segment_alignment] self.theta[segment] = self._partitioned_results.theta[segment][:, segment_alignment] def mask(self, indices): self.theta.mask[indices] = True self.skeletons.mask[indices] = True self.scores.mask[indices] = True def num_valid(self): return np.sum(~self.scores.mask) class _FinalResults(BaseResults): @classmethod def from_resolved(cls, resolved_results: _ResolvedResults): return _FinalResults( theta=resolved_results.theta.filled(np.nan), skeletons=resolved_results.skeletons.filled(np.nan), scores=resolved_results.scores.filled(np.nan), ) @classmethod def from_shuffled(cls, shuffled_results: ShuffledResults): return _FinalResults( theta=np.full_like(shuffled_results.theta[:, 0], np.nan), skeletons=np.full_like(shuffled_results.scores[:, 0], np.nan), scores=np.full_like(shuffled_results.skeletons[:, 0], np.nan), ) def _make_continuous_partitions( shuffled_results: ShuffledResults, score_threshold: float, frame_rate: float ) -> _PartitionedResults: time_window = max(1, int(frame_rate * CONTINOUS_SEGMENT_TIME_WINDOW_SEC)) min_segment_size = max(1, int(frame_rate * MIN_SEGMENT_SIZE_SEC)) partitioned_results = _PartitionedResults(shuffled_results) # discard low score frames early (use the maximum value of both scores for now) good_score_frames = np.where(ma.greater_equal(ma.max(shuffled_results.scores, axis=1), score_threshold))[0] for frame_index in good_score_frames: prev_theta = partitioned_results.theta[frame_index - min(time_window, frame_index) : frame_index, 0] # if there is a big gap > time_window we start a new partition, with a random value (0) if np.all(np.any(prev_theta.mask, axis=1)): partitioned_results.set_partition(frame_index=frame_index, partition=0, new_partition=True) # otherwise we look in the time_window close past the closest non nan frame see if we can continue the # partition as long as the values stay continuous else: last_valid_index = np.where(~np.any(prev_theta.mask, axis=1))[0][-1] dists = [ angle_distance( shuffled_results.theta[frame_index, k, :], prev_theta[last_valid_index], ) for k in range(2) ] partition = int(np.argmin(dists)) if dists[partition] < CONTINUOUS_ANGLES_DIST_THRESHOLD: partitioned_results.set_partition(frame_index=frame_index, partition=partition) # discard short segments for cur_partition_indices in partitioned_results.get_segments(): if len(cur_partition_indices) < min_segment_size: partitioned_results.mask(cur_partition_indices) return partitioned_results def _align_segments_with_labels(segments, partitioned_skeletons, labelled_skeletons, min_labelled=5): """ Match the head/tail alignment with the results of the classical tracking in each of the segments, if there is enough labelled data in the segment """ segments_alignment = ma.masked_all((len(segments),), dtype=np.uint8) for segment_index, segment in enumerate(segments): segment_skeletons = labelled_skeletons[segment] non_nan_labelled = np.any(~np.isnan(segment_skeletons), axis=(1, 2)) labels_count = np.sum(non_nan_labelled) non_masked = ~np.any(partitioned_skeletons[segment].mask, axis=(1, 2, 3)) to_compare = np.logical_and(non_nan_labelled, non_masked) similarity_scores = [] for label_skel, partitioned_skeleton in zip( segment_skeletons[to_compare], partitioned_skeletons[segment][to_compare] ): dists = [skeleton_distance(label_skel, x) for x in partitioned_skeleton] similarity_scores.append(dists) if len(similarity_scores) > 0: mean_similarity_scores = np.mean(similarity_scores, axis=0) if mean_similarity_scores[0] * mean_similarity_scores[1] < 0 and labels_count > min_labelled: segments_alignment[segment_index] = np.argmax(mean_similarity_scores) return segments_alignment def _calculate_smallest_gap_to_adjacent(segment_index, segments, segments_alignment): # evaluate how far away this segment is from known values score = np.nan segment_offset = np.nan if segment_index - 1 >= 0 and not segments_alignment.mask[segment_index - 1]: gap = segments[segment_index][0] - segments[segment_index - 1][-1] score = gap segment_offset = -1 if segment_index + 1 < len(segments_alignment) and not segments_alignment.mask[segment_index + 1]: gap = segments[segment_index + 1][0] - segments[segment_index][-1] if np.isnan(score) or gap < score: score = gap segment_offset = 1 return score, segment_offset def _align_unlabelled_segments_with_adjacents(segments, segments_alignment, partitioned_skeletons, frame_rate: float): """ Resolve the unaligned segments by comparing with adjacent segments, starting with the segments that have the least frames gap between an adjacent trusted segment Don't align isolated segments which a big gap between trusted segments """ maximum_gap_allowed = max(1, int(frame_rate * MAXIMUM_GAP_ALLOWED_WITH_ADJACENT_SEGMENT_SEC)) # ensure that if no segments have been aligned at all, pick one solution randomly to start if np.all(segments_alignment.mask): logger.info("There are no trusted segments with head decision to resolve the whole video, stopping analysis.") return segments_alignment # fix in priority the segments with known adjacent frames with little gap # until all segments are aligned except the isolated ones (further than maximum_gap_allowed) unaligned = np.where(segments_alignment.mask)[0] while len(unaligned) > 0: # we first pick the best candidate segment to align (there are known frames nearby before or after or both) all_gaps = [ _calculate_smallest_gap_to_adjacent( segment_index=x, segments=segments, segments_alignment=segments_alignment, ) for x in unaligned ] segment_to_fix_index = np.nanargmin(all_gaps, axis=0)[0] gap_to_adjacent_segment, adjacent_segment_offset = all_gaps[segment_to_fix_index] # abort if only isolated segments are left if gap_to_adjacent_segment > maximum_gap_allowed: break cur_segment_index = unaligned[segment_to_fix_index] cur_segment_skeleton = partitioned_skeletons[segments[cur_segment_index]] adjacent_segment_index = cur_segment_index + adjacent_segment_offset adjacent_alignment = segments_alignment[adjacent_segment_index] adjacent_segment = segments[adjacent_segment_index] adjacent_segment_skeleton = partitioned_skeletons[adjacent_segment][:, adjacent_alignment] if adjacent_segment_offset == -1: closest_unaligned_skeleton = cur_segment_skeleton[0] # first frame of cur segment closest_known_skeleton = adjacent_segment_skeleton[-1] # last frame of prev segment elif adjacent_segment_offset == 1: closest_unaligned_skeleton = cur_segment_skeleton[-1] # last frame of cur segment closest_known_skeleton = adjacent_segment_skeleton[0] # first frame of next segment else: raise ValueError() dists = [skeleton_distance(closest_known_skeleton, skel) for skel in closest_unaligned_skeleton] segments_alignment[cur_segment_index] = int(np.argmax(dists)) unaligned = np.where(segments_alignment.mask)[0] return segments_alignment def _init_unified_series(mixed_series): return ma.masked_all((mixed_series.shape[0],) + mixed_series.shape[2:], dtype=mixed_series.dtype) def resolve_head_tail( shuffled_results: ShuffledResults, original_results: OriginalResults, frame_rate: float, score_threshold, ) -> BaseResults: len_series = len(shuffled_results) # Create continuous segments without jumps partitioned_results = _make_continuous_partitions( score_threshold=score_threshold, frame_rate=frame_rate, shuffled_results=shuffled_results, ) segments = partitioned_results.get_segments() if len(segments) == 0: logger.error( f"Couldn't find any continuous segments of predicted data above the threshold {score_threshold}," f" stopping analysis." ) return _FinalResults.from_shuffled(shuffled_results) # Choose each segment global alignment by comparing with labelled data segments_alignment = _align_segments_with_labels( segments, partitioned_results.skeletons, original_results.skeletons ) # Fix unaligned segments here by comparing skeletons with neighboring segments iteratively segments_alignment = _align_unlabelled_segments_with_adjacents( segments, segments_alignment, partitioned_results.skeletons, frame_rate ) # Compile results resolved_results = _ResolvedResults(partitioned_results) for segment, segment_alignment in zip(segments, segments_alignment): if not ma.is_masked(segment_alignment): resolved_results.resolve(segment, segment_alignment) # Filter the final results again by score threshold low_scores_indices = np.where(ma.masked_less(resolved_results.scores, score_threshold).mask)[0] resolved_results.mask(low_scores_indices) num_success = resolved_results.num_valid() original_num_success = np.any(~np.isnan(original_results.skeletons), axis=(1, 2)).sum() logger.info( f"Resolved head/tail, {num_success} out of {len_series} frames analyzed successfully " f"({float(num_success) / len_series * 100:.1f}%) (original features : {original_num_success}" f" or {(float(original_num_success) / len_series * 100):.1f}% of total)" ) if num_success < original_num_success: logger.warning(f"Original results had {original_num_success - num_success} more successfully analyzed frames!") return _FinalResults.from_resolved(resolved_results)
random_line_split
game.rs
use std::convert::TryFrom; use hdk::{ utils, entry_definition::ValidatingEntryType, error::{ZomeApiResult, ZomeApiError}, holochain_persistence_api::{ cas::content::{AddressableContent, Address}, }, holochain_json_api::{ error::JsonError, json::JsonString, }, holochain_core_types::{ dna::entry_types::Sharing, validation::EntryValidationData, entry::Entry, link::LinkMatch, } }; use crate::game_move::Move; use crate::GameState; #[derive(Clone, Debug, Serialize, Deserialize, DefaultJson)] pub struct Game { pub player_1: Address, pub player_2: Address, pub created_at: u32, } /*===================================== = DHT Functions = =====================================*/ /// Traverse the linked list rooted at a game to find all the moves pub fn get_moves(game_address: &Address) -> ZomeApiResult<Vec<Move>> { match hdk::get_links(game_address, LinkMatch::Any, LinkMatch::Any)?.addresses().into_iter().next() { /* get links returns the ZomeApiResult<GetLinksResult>. * This will get entries that are linked to the first argument. * Since ZomeApiResult returns Result<T, ZomeApiError>(where T in this case is the GetLinksResult), * you can use the ? sugar to return the ZomeApiError if error then return the T if get_links is a success. * GetLinkResult has a method implemented called addresses() which returns a vector of Addresses. * into_iter() will iterate through this vector of addresses and move the value from the vector to an Iterator. * next() is a method for iterator where in it returns the next value of the Iterator (start at index 0) in Option<Self::Item> * Since next() returns an Option<Self::Item>, we can use the match operator to cater to all possible values of Option<Self: Item> */ Some(first_move) => { let mut move_addresses = vec![first_move]; let mut more = true; while more { more = match hdk::get_links(move_addresses.last().unwrap(), LinkMatch::Any, LinkMatch::Any)?.addresses().into_iter().next() { Some(addr) => { move_addresses.push(addr.clone()); true }, None => { false }, } } /* In this match operator, we first cater to Some(first_move). The name is first_move because * the Game entry is always linked to the first_move made by Player 2. * So we store this first_move to a vector in a variable name move_addresses. * Then we create a while loop in order to store all the game_move entries that are linked to the first_move. * while more is true, we get the entries linked to the first_move, then the next move and the next move and * on and on and on until we finish all the linked moves. The way this works is, in the first argument of get_links, * we get the very last element of the move_addresses vector using last() which returns a Option<&T>. * Since we want the address itself wrapped in Option<&T>, we will use unwrap() to get the value of the Address. * In this way, we will always have the last address stored in move_addresses as our first argument in get_links.Address. * Then we do the same thing we did above to move the value from a vector of addresses to an Iterator then get the value with next(). * Then we run the match operator again to store the address in the move_addresses using push() then return true to run the loop again. * Since next() returns None if there is no more value to be retrieved in the Iterator, we return false in None so that the loop ends after * we get all the moves that are linked together. */ let moves: Vec<Move> = move_addresses.iter().map(|addr| { let move_entry = hdk::get_entry(addr).unwrap().unwrap(); if let Entry::App(_, move_struct) = move_entry { Move::try_from(move_struct).expect("Entry at address is type other than Move") } else { panic!("Not an app entry!") } }).collect(); /* Now that we have a vector of addresses for all connected moves, we will now try to retrieve the data itself which can * be found in the Addresses we retrieved. First, we create a variable named moves which is a type of Vec<Move>. In this variable, * we will use the iter() method on move_addresses (note that we used iter() instead of into_iter() because we dont want * to move the value from move_addresses but rather have a referennce to the addresses found in the move_addresses.) and then * use map() method provided in Iterator. map() takes a closure and creates an iterator which calls that closure on each element. * the closure will have addr as an argument. The closure creates a variable named move_entry in which we will use the method * get_entry which takes an Address(HashString) type then return ZomeApiResult<Option<Entry>>. We then unwrap it twice to * retrieve the Entry itself. Then we use if let to match the move_entry with an Entry::App variant. This is because Entry * enum can have different variants and we need to makesure that the entry found in this address is an App variant. If not * then we throw a panic in else statement saying that it is not an app entry. Now if it is an app entry, we use the try_from method * to try to convert the Entry::App, which we assume to have the Move struct in the second element of App * variant(here named as move_struct) as the AppEntryValue type, to an actual Move struct. If the try_from fails then we throw an error * saying the Entry at the given address is not a Move type of entry. After we call the closure on all addresses in move_addresses, * we use the collect() to turn them into Vec<Move>. collect() would understand that the items should be collected into Vec<Move> * since that is the defined type for moves. */ Ok(moves) }, None => { Ok(Vec::new()) } } } pub fn get_state(game_address: &Address) -> ZomeApiResult<GameState> { let moves = get_moves(game_address)?; let game = get_game(game_address)?; let new_state = moves.iter().fold(GameState::initial(), |state, new_move| state.evolve(game.clone(), new_move)); Ok(new_state) /* get_state takes the address of the game as a parameter and return a ZomeApiResult<GameState>. This is a reducer function. * First we create a vairable named moves and call the get_moves in it with the parameter game_address. * Since we have the ? operator in get_moves(), it will return the value T in Result<T, ZomeApiError> if nothing goes wrong. * T in this case is Vec<Move> which will also be the type of moves variable. next we create the game variable an call the get_game * with the game_address being its argument. get_game also returns ZomeApiResult with Game being the success value so we * use the ? to get the Game struct if no error occurs. with moves and game having the vectors we need, we will now create * a variable name new_state and call iter() on moves to turn it into an Iterator in order for us to call a method fold() on it. * fold() takes two arguments: an initial value, and a closure with two arguments: an 'accumulator', and an element. * The closure returns the value that the accumulator should have for the next iteration. In this case, the initial value is an empty * GameState created with initial() we associated with GameState. Then the accumulator will be named state which will hold the * initial value (empty GameState) we set. new_move will be each Move stored in moves. now we call the evolve() method we associated * with GameState in state.rs. evolve takes self, Game struct, and &Move so we clone game and give it as a first argument and a * reference to moves with new_move(automatically a reference since the element in fold has FnMut implemented). This evolve method will * add all the Move that is in the moves to the GameState which will be stored in new_state. now we can return this as Ok(new_state) */ } pub fn get_game(game_address: &Address) -> ZomeApiResult<Game> { utils::get_as_type(game_address.to_owned()) /* get_as_type load an entry from the given address in the argument then convert it to a given type wrapped in ZomeApiResult. In this case, * rust will infer that the type is Game since that is the return value of get_game function so it will convert the loaded entry from the * given address to ZomeApiResult<Game> */ } /*===== End of DHT Functions ======*/ /*============================================= = Local chain functions = =============================================*/ pub fn get_game_local_chain(local_chain: Vec<Entry>, game_address: &Address) -> ZomeApiResult<Game> { local_chain .iter() .filter(|entry| { entry.address() == game_address.to_owned() }) .filter_map(|entry| { if let Entry::App(_, entry_data) = entry { Some(Game::try_from(entry_data.clone()).unwrap()) } else { None } }) .next() .ok_or(ZomeApiError::HashNotFound) /* get_game_local_chain() gets all the Entry in the local_chain as well as the address of the game and will return ZomeApiResult<Game>. * now we will call the iter() method on the local_chain so that we can call the filter() method. filter() method will create an iterator * which uses a closure to determine if an element should be yielded. the closure must return true or false and if the closure returns * true on that element then filter() will return that element. if its a false it simply runs the same closure on the nexrt element. * now filter's closure check if the address of the each element found in the local_chain is equal to the address of game_address * by getting the address of each element in the localchain using address() method provided for the Entry type in hdk. we need to clone * the game_address because we are passing a reference in the parameter and we cant compare a reference to an actual value(not 100% sure * correct me if im wrong). If the address of the entry matches the game_address that is passed in the paramater, then we return that entry. * After getting all elements that have the address of game_address, we implement the filter_map() method which filters then maps. * filter_map() takes a closure as an argument which has to return Option<T>. If the closure returns Some(element) then we return the * element. If the closure returns None then we just skip and try the closure on the next element in local_chain. inside the closure, * we use the if let to make sure that each element is an Entry::App variant. If not we return None but if it is, then we use the try_from() * method on the entry_data found in the Entry::App and convert it to the Game struct cos at this point we are sure that the element * is an Entry::App variant that holds the Game struct as AppEntryValue. try_from returns Result<Self, Self::Error> so we use unwrap to get * the Self which in this case is Game. Since at this point, we are sure that there is only one match for the game_address provided * in the parameter, we use the next() to return the element. Since next() returns an Option<T>, we use the ok_or() method to turn * Option<T> to Result<T, E> and E here being the ZomeApiError::HashNotFound variant which indicates that the game_address provided in the * parameter did not match any entry in the local_chain. we return ZomeApiError::HashNotFound because ZomeApiResult expects any variant * of the ZomeApiError to be returned as an error value. */ } pub fn get_moves_local_chain(local_chain: Vec<Entry>, game_address: &Address) -> ZomeApiResult<Vec<Move>> { Ok(local_chain .iter() .filter_map(|entry| { if let Entry::App(entry_type, entry_data) = entry { if entry_type.to_string() == "move" { Some(Move::try_from(entry_data.clone()).unwrap()) } else { None } } else { None } }) .filter(|game_move| { game_move.game == game_address.to_owned() }) .rev() .collect()) /* This one is similar to get_game_local_chain. It takes the local_chain Entries and the game_address as the parameter and returns * a vector of Move wrapped in ZomeApiResult. We first call iter() again then use filter_map() to filter the entries in local chain * to Entry::App variant then if the entry_type (1st element of App variant) is equal to "move" then we return that entry using try_from * method and wrap it in Some(). else we return None if there is no Entry that has the entry_type of "move" and return None also if there * is no Entry:App variant in the local chain. After getting all entries with "move" as the entry_type, we need to filter them and only * yield "move" entry that has the game_address passed in the parameter. That's what the next filter() is for and we check if the game * field of the "move" entry we retrieve from filter_map equals to the game_address being passed in the parameter. We then use rev() to reverse * the iteration when we use the collect() method in order to collect them and turn them into Vec<Move>. // To verify:: why use rev()?? */ } pub fn get_state_local_chain(local_chain: Vec<Entry>, game_address: &Address) -> ZomeApiResult<GameState> { let moves = get_moves_local_chain(local_chain.clone(), game_address)?;
let new_state = moves.iter().fold(GameState::initial(), move |state, new_move| state.evolve(game.clone(), new_move)); Ok(new_state) /* get_state_local_chain is similar to get_state function. It takes local_chain and game_address as parameters and return the GameState. * we first get all the moves associated with the game_address given as parameter using get_moves_local_chain and store them in moves * variable. Then we get the game struct found at game_address using get_game_local_chain. We then create new_state and call iter() * on moves thne use fold() method. fold() will take an empty GameState sturct and then call evolve method on that empty GameState (sored in * state) to store all the Moves. the move keyword before the closure actually means is to move ownership of all of the captured * variables in the closure rather than borrow. To Verify: Not really sure which captured variables we are moving here rather than borrowing. */ } /*===== End of Local chain functions ======*/ pub fn definition() -> ValidatingEntryType { entry!( name: "game", description: "Represents an occurence of a game between several agents", sharing: Sharing::Public, validation_package: || { hdk::ValidationPackageDefinition::Entry }, validation: | validation_data: hdk::EntryValidationData<Game>| { match validation_data { EntryValidationData::Create{entry, validation_data: _} => { let game = entry as Game; if game.player_1 == game.player_2 { return Err("Player 1 and Player 2 must be different agents.".into()) } Ok(()) }, _ => { Err("Cannot modify or delete a game".into()) } } } ) }
let game = get_game_local_chain(local_chain, game_address)?;
random_line_split
game.rs
use std::convert::TryFrom; use hdk::{ utils, entry_definition::ValidatingEntryType, error::{ZomeApiResult, ZomeApiError}, holochain_persistence_api::{ cas::content::{AddressableContent, Address}, }, holochain_json_api::{ error::JsonError, json::JsonString, }, holochain_core_types::{ dna::entry_types::Sharing, validation::EntryValidationData, entry::Entry, link::LinkMatch, } }; use crate::game_move::Move; use crate::GameState; #[derive(Clone, Debug, Serialize, Deserialize, DefaultJson)] pub struct Game { pub player_1: Address, pub player_2: Address, pub created_at: u32, } /*===================================== = DHT Functions = =====================================*/ /// Traverse the linked list rooted at a game to find all the moves pub fn
(game_address: &Address) -> ZomeApiResult<Vec<Move>> { match hdk::get_links(game_address, LinkMatch::Any, LinkMatch::Any)?.addresses().into_iter().next() { /* get links returns the ZomeApiResult<GetLinksResult>. * This will get entries that are linked to the first argument. * Since ZomeApiResult returns Result<T, ZomeApiError>(where T in this case is the GetLinksResult), * you can use the ? sugar to return the ZomeApiError if error then return the T if get_links is a success. * GetLinkResult has a method implemented called addresses() which returns a vector of Addresses. * into_iter() will iterate through this vector of addresses and move the value from the vector to an Iterator. * next() is a method for iterator where in it returns the next value of the Iterator (start at index 0) in Option<Self::Item> * Since next() returns an Option<Self::Item>, we can use the match operator to cater to all possible values of Option<Self: Item> */ Some(first_move) => { let mut move_addresses = vec![first_move]; let mut more = true; while more { more = match hdk::get_links(move_addresses.last().unwrap(), LinkMatch::Any, LinkMatch::Any)?.addresses().into_iter().next() { Some(addr) => { move_addresses.push(addr.clone()); true }, None => { false }, } } /* In this match operator, we first cater to Some(first_move). The name is first_move because * the Game entry is always linked to the first_move made by Player 2. * So we store this first_move to a vector in a variable name move_addresses. * Then we create a while loop in order to store all the game_move entries that are linked to the first_move. * while more is true, we get the entries linked to the first_move, then the next move and the next move and * on and on and on until we finish all the linked moves. The way this works is, in the first argument of get_links, * we get the very last element of the move_addresses vector using last() which returns a Option<&T>. * Since we want the address itself wrapped in Option<&T>, we will use unwrap() to get the value of the Address. * In this way, we will always have the last address stored in move_addresses as our first argument in get_links.Address. * Then we do the same thing we did above to move the value from a vector of addresses to an Iterator then get the value with next(). * Then we run the match operator again to store the address in the move_addresses using push() then return true to run the loop again. * Since next() returns None if there is no more value to be retrieved in the Iterator, we return false in None so that the loop ends after * we get all the moves that are linked together. */ let moves: Vec<Move> = move_addresses.iter().map(|addr| { let move_entry = hdk::get_entry(addr).unwrap().unwrap(); if let Entry::App(_, move_struct) = move_entry { Move::try_from(move_struct).expect("Entry at address is type other than Move") } else { panic!("Not an app entry!") } }).collect(); /* Now that we have a vector of addresses for all connected moves, we will now try to retrieve the data itself which can * be found in the Addresses we retrieved. First, we create a variable named moves which is a type of Vec<Move>. In this variable, * we will use the iter() method on move_addresses (note that we used iter() instead of into_iter() because we dont want * to move the value from move_addresses but rather have a referennce to the addresses found in the move_addresses.) and then * use map() method provided in Iterator. map() takes a closure and creates an iterator which calls that closure on each element. * the closure will have addr as an argument. The closure creates a variable named move_entry in which we will use the method * get_entry which takes an Address(HashString) type then return ZomeApiResult<Option<Entry>>. We then unwrap it twice to * retrieve the Entry itself. Then we use if let to match the move_entry with an Entry::App variant. This is because Entry * enum can have different variants and we need to makesure that the entry found in this address is an App variant. If not * then we throw a panic in else statement saying that it is not an app entry. Now if it is an app entry, we use the try_from method * to try to convert the Entry::App, which we assume to have the Move struct in the second element of App * variant(here named as move_struct) as the AppEntryValue type, to an actual Move struct. If the try_from fails then we throw an error * saying the Entry at the given address is not a Move type of entry. After we call the closure on all addresses in move_addresses, * we use the collect() to turn them into Vec<Move>. collect() would understand that the items should be collected into Vec<Move> * since that is the defined type for moves. */ Ok(moves) }, None => { Ok(Vec::new()) } } } pub fn get_state(game_address: &Address) -> ZomeApiResult<GameState> { let moves = get_moves(game_address)?; let game = get_game(game_address)?; let new_state = moves.iter().fold(GameState::initial(), |state, new_move| state.evolve(game.clone(), new_move)); Ok(new_state) /* get_state takes the address of the game as a parameter and return a ZomeApiResult<GameState>. This is a reducer function. * First we create a vairable named moves and call the get_moves in it with the parameter game_address. * Since we have the ? operator in get_moves(), it will return the value T in Result<T, ZomeApiError> if nothing goes wrong. * T in this case is Vec<Move> which will also be the type of moves variable. next we create the game variable an call the get_game * with the game_address being its argument. get_game also returns ZomeApiResult with Game being the success value so we * use the ? to get the Game struct if no error occurs. with moves and game having the vectors we need, we will now create * a variable name new_state and call iter() on moves to turn it into an Iterator in order for us to call a method fold() on it. * fold() takes two arguments: an initial value, and a closure with two arguments: an 'accumulator', and an element. * The closure returns the value that the accumulator should have for the next iteration. In this case, the initial value is an empty * GameState created with initial() we associated with GameState. Then the accumulator will be named state which will hold the * initial value (empty GameState) we set. new_move will be each Move stored in moves. now we call the evolve() method we associated * with GameState in state.rs. evolve takes self, Game struct, and &Move so we clone game and give it as a first argument and a * reference to moves with new_move(automatically a reference since the element in fold has FnMut implemented). This evolve method will * add all the Move that is in the moves to the GameState which will be stored in new_state. now we can return this as Ok(new_state) */ } pub fn get_game(game_address: &Address) -> ZomeApiResult<Game> { utils::get_as_type(game_address.to_owned()) /* get_as_type load an entry from the given address in the argument then convert it to a given type wrapped in ZomeApiResult. In this case, * rust will infer that the type is Game since that is the return value of get_game function so it will convert the loaded entry from the * given address to ZomeApiResult<Game> */ } /*===== End of DHT Functions ======*/ /*============================================= = Local chain functions = =============================================*/ pub fn get_game_local_chain(local_chain: Vec<Entry>, game_address: &Address) -> ZomeApiResult<Game> { local_chain .iter() .filter(|entry| { entry.address() == game_address.to_owned() }) .filter_map(|entry| { if let Entry::App(_, entry_data) = entry { Some(Game::try_from(entry_data.clone()).unwrap()) } else { None } }) .next() .ok_or(ZomeApiError::HashNotFound) /* get_game_local_chain() gets all the Entry in the local_chain as well as the address of the game and will return ZomeApiResult<Game>. * now we will call the iter() method on the local_chain so that we can call the filter() method. filter() method will create an iterator * which uses a closure to determine if an element should be yielded. the closure must return true or false and if the closure returns * true on that element then filter() will return that element. if its a false it simply runs the same closure on the nexrt element. * now filter's closure check if the address of the each element found in the local_chain is equal to the address of game_address * by getting the address of each element in the localchain using address() method provided for the Entry type in hdk. we need to clone * the game_address because we are passing a reference in the parameter and we cant compare a reference to an actual value(not 100% sure * correct me if im wrong). If the address of the entry matches the game_address that is passed in the paramater, then we return that entry. * After getting all elements that have the address of game_address, we implement the filter_map() method which filters then maps. * filter_map() takes a closure as an argument which has to return Option<T>. If the closure returns Some(element) then we return the * element. If the closure returns None then we just skip and try the closure on the next element in local_chain. inside the closure, * we use the if let to make sure that each element is an Entry::App variant. If not we return None but if it is, then we use the try_from() * method on the entry_data found in the Entry::App and convert it to the Game struct cos at this point we are sure that the element * is an Entry::App variant that holds the Game struct as AppEntryValue. try_from returns Result<Self, Self::Error> so we use unwrap to get * the Self which in this case is Game. Since at this point, we are sure that there is only one match for the game_address provided * in the parameter, we use the next() to return the element. Since next() returns an Option<T>, we use the ok_or() method to turn * Option<T> to Result<T, E> and E here being the ZomeApiError::HashNotFound variant which indicates that the game_address provided in the * parameter did not match any entry in the local_chain. we return ZomeApiError::HashNotFound because ZomeApiResult expects any variant * of the ZomeApiError to be returned as an error value. */ } pub fn get_moves_local_chain(local_chain: Vec<Entry>, game_address: &Address) -> ZomeApiResult<Vec<Move>> { Ok(local_chain .iter() .filter_map(|entry| { if let Entry::App(entry_type, entry_data) = entry { if entry_type.to_string() == "move" { Some(Move::try_from(entry_data.clone()).unwrap()) } else { None } } else { None } }) .filter(|game_move| { game_move.game == game_address.to_owned() }) .rev() .collect()) /* This one is similar to get_game_local_chain. It takes the local_chain Entries and the game_address as the parameter and returns * a vector of Move wrapped in ZomeApiResult. We first call iter() again then use filter_map() to filter the entries in local chain * to Entry::App variant then if the entry_type (1st element of App variant) is equal to "move" then we return that entry using try_from * method and wrap it in Some(). else we return None if there is no Entry that has the entry_type of "move" and return None also if there * is no Entry:App variant in the local chain. After getting all entries with "move" as the entry_type, we need to filter them and only * yield "move" entry that has the game_address passed in the parameter. That's what the next filter() is for and we check if the game * field of the "move" entry we retrieve from filter_map equals to the game_address being passed in the parameter. We then use rev() to reverse * the iteration when we use the collect() method in order to collect them and turn them into Vec<Move>. // To verify:: why use rev()?? */ } pub fn get_state_local_chain(local_chain: Vec<Entry>, game_address: &Address) -> ZomeApiResult<GameState> { let moves = get_moves_local_chain(local_chain.clone(), game_address)?; let game = get_game_local_chain(local_chain, game_address)?; let new_state = moves.iter().fold(GameState::initial(), move |state, new_move| state.evolve(game.clone(), new_move)); Ok(new_state) /* get_state_local_chain is similar to get_state function. It takes local_chain and game_address as parameters and return the GameState. * we first get all the moves associated with the game_address given as parameter using get_moves_local_chain and store them in moves * variable. Then we get the game struct found at game_address using get_game_local_chain. We then create new_state and call iter() * on moves thne use fold() method. fold() will take an empty GameState sturct and then call evolve method on that empty GameState (sored in * state) to store all the Moves. the move keyword before the closure actually means is to move ownership of all of the captured * variables in the closure rather than borrow. To Verify: Not really sure which captured variables we are moving here rather than borrowing. */ } /*===== End of Local chain functions ======*/ pub fn definition() -> ValidatingEntryType { entry!( name: "game", description: "Represents an occurence of a game between several agents", sharing: Sharing::Public, validation_package: || { hdk::ValidationPackageDefinition::Entry }, validation: | validation_data: hdk::EntryValidationData<Game>| { match validation_data { EntryValidationData::Create{entry, validation_data: _} => { let game = entry as Game; if game.player_1 == game.player_2 { return Err("Player 1 and Player 2 must be different agents.".into()) } Ok(()) }, _ => { Err("Cannot modify or delete a game".into()) } } } ) }
get_moves
identifier_name
game.rs
use std::convert::TryFrom; use hdk::{ utils, entry_definition::ValidatingEntryType, error::{ZomeApiResult, ZomeApiError}, holochain_persistence_api::{ cas::content::{AddressableContent, Address}, }, holochain_json_api::{ error::JsonError, json::JsonString, }, holochain_core_types::{ dna::entry_types::Sharing, validation::EntryValidationData, entry::Entry, link::LinkMatch, } }; use crate::game_move::Move; use crate::GameState; #[derive(Clone, Debug, Serialize, Deserialize, DefaultJson)] pub struct Game { pub player_1: Address, pub player_2: Address, pub created_at: u32, } /*===================================== = DHT Functions = =====================================*/ /// Traverse the linked list rooted at a game to find all the moves pub fn get_moves(game_address: &Address) -> ZomeApiResult<Vec<Move>> { match hdk::get_links(game_address, LinkMatch::Any, LinkMatch::Any)?.addresses().into_iter().next() { /* get links returns the ZomeApiResult<GetLinksResult>. * This will get entries that are linked to the first argument. * Since ZomeApiResult returns Result<T, ZomeApiError>(where T in this case is the GetLinksResult), * you can use the ? sugar to return the ZomeApiError if error then return the T if get_links is a success. * GetLinkResult has a method implemented called addresses() which returns a vector of Addresses. * into_iter() will iterate through this vector of addresses and move the value from the vector to an Iterator. * next() is a method for iterator where in it returns the next value of the Iterator (start at index 0) in Option<Self::Item> * Since next() returns an Option<Self::Item>, we can use the match operator to cater to all possible values of Option<Self: Item> */ Some(first_move) => { let mut move_addresses = vec![first_move]; let mut more = true; while more { more = match hdk::get_links(move_addresses.last().unwrap(), LinkMatch::Any, LinkMatch::Any)?.addresses().into_iter().next() { Some(addr) => { move_addresses.push(addr.clone()); true }, None => { false }, } } /* In this match operator, we first cater to Some(first_move). The name is first_move because * the Game entry is always linked to the first_move made by Player 2. * So we store this first_move to a vector in a variable name move_addresses. * Then we create a while loop in order to store all the game_move entries that are linked to the first_move. * while more is true, we get the entries linked to the first_move, then the next move and the next move and * on and on and on until we finish all the linked moves. The way this works is, in the first argument of get_links, * we get the very last element of the move_addresses vector using last() which returns a Option<&T>. * Since we want the address itself wrapped in Option<&T>, we will use unwrap() to get the value of the Address. * In this way, we will always have the last address stored in move_addresses as our first argument in get_links.Address. * Then we do the same thing we did above to move the value from a vector of addresses to an Iterator then get the value with next(). * Then we run the match operator again to store the address in the move_addresses using push() then return true to run the loop again. * Since next() returns None if there is no more value to be retrieved in the Iterator, we return false in None so that the loop ends after * we get all the moves that are linked together. */ let moves: Vec<Move> = move_addresses.iter().map(|addr| { let move_entry = hdk::get_entry(addr).unwrap().unwrap(); if let Entry::App(_, move_struct) = move_entry { Move::try_from(move_struct).expect("Entry at address is type other than Move") } else { panic!("Not an app entry!") } }).collect(); /* Now that we have a vector of addresses for all connected moves, we will now try to retrieve the data itself which can * be found in the Addresses we retrieved. First, we create a variable named moves which is a type of Vec<Move>. In this variable, * we will use the iter() method on move_addresses (note that we used iter() instead of into_iter() because we dont want * to move the value from move_addresses but rather have a referennce to the addresses found in the move_addresses.) and then * use map() method provided in Iterator. map() takes a closure and creates an iterator which calls that closure on each element. * the closure will have addr as an argument. The closure creates a variable named move_entry in which we will use the method * get_entry which takes an Address(HashString) type then return ZomeApiResult<Option<Entry>>. We then unwrap it twice to * retrieve the Entry itself. Then we use if let to match the move_entry with an Entry::App variant. This is because Entry * enum can have different variants and we need to makesure that the entry found in this address is an App variant. If not * then we throw a panic in else statement saying that it is not an app entry. Now if it is an app entry, we use the try_from method * to try to convert the Entry::App, which we assume to have the Move struct in the second element of App * variant(here named as move_struct) as the AppEntryValue type, to an actual Move struct. If the try_from fails then we throw an error * saying the Entry at the given address is not a Move type of entry. After we call the closure on all addresses in move_addresses, * we use the collect() to turn them into Vec<Move>. collect() would understand that the items should be collected into Vec<Move> * since that is the defined type for moves. */ Ok(moves) }, None => { Ok(Vec::new()) } } } pub fn get_state(game_address: &Address) -> ZomeApiResult<GameState> { let moves = get_moves(game_address)?; let game = get_game(game_address)?; let new_state = moves.iter().fold(GameState::initial(), |state, new_move| state.evolve(game.clone(), new_move)); Ok(new_state) /* get_state takes the address of the game as a parameter and return a ZomeApiResult<GameState>. This is a reducer function. * First we create a vairable named moves and call the get_moves in it with the parameter game_address. * Since we have the ? operator in get_moves(), it will return the value T in Result<T, ZomeApiError> if nothing goes wrong. * T in this case is Vec<Move> which will also be the type of moves variable. next we create the game variable an call the get_game * with the game_address being its argument. get_game also returns ZomeApiResult with Game being the success value so we * use the ? to get the Game struct if no error occurs. with moves and game having the vectors we need, we will now create * a variable name new_state and call iter() on moves to turn it into an Iterator in order for us to call a method fold() on it. * fold() takes two arguments: an initial value, and a closure with two arguments: an 'accumulator', and an element. * The closure returns the value that the accumulator should have for the next iteration. In this case, the initial value is an empty * GameState created with initial() we associated with GameState. Then the accumulator will be named state which will hold the * initial value (empty GameState) we set. new_move will be each Move stored in moves. now we call the evolve() method we associated * with GameState in state.rs. evolve takes self, Game struct, and &Move so we clone game and give it as a first argument and a * reference to moves with new_move(automatically a reference since the element in fold has FnMut implemented). This evolve method will * add all the Move that is in the moves to the GameState which will be stored in new_state. now we can return this as Ok(new_state) */ } pub fn get_game(game_address: &Address) -> ZomeApiResult<Game> { utils::get_as_type(game_address.to_owned()) /* get_as_type load an entry from the given address in the argument then convert it to a given type wrapped in ZomeApiResult. In this case, * rust will infer that the type is Game since that is the return value of get_game function so it will convert the loaded entry from the * given address to ZomeApiResult<Game> */ } /*===== End of DHT Functions ======*/ /*============================================= = Local chain functions = =============================================*/ pub fn get_game_local_chain(local_chain: Vec<Entry>, game_address: &Address) -> ZomeApiResult<Game> { local_chain .iter() .filter(|entry| { entry.address() == game_address.to_owned() }) .filter_map(|entry| { if let Entry::App(_, entry_data) = entry
else { None } }) .next() .ok_or(ZomeApiError::HashNotFound) /* get_game_local_chain() gets all the Entry in the local_chain as well as the address of the game and will return ZomeApiResult<Game>. * now we will call the iter() method on the local_chain so that we can call the filter() method. filter() method will create an iterator * which uses a closure to determine if an element should be yielded. the closure must return true or false and if the closure returns * true on that element then filter() will return that element. if its a false it simply runs the same closure on the nexrt element. * now filter's closure check if the address of the each element found in the local_chain is equal to the address of game_address * by getting the address of each element in the localchain using address() method provided for the Entry type in hdk. we need to clone * the game_address because we are passing a reference in the parameter and we cant compare a reference to an actual value(not 100% sure * correct me if im wrong). If the address of the entry matches the game_address that is passed in the paramater, then we return that entry. * After getting all elements that have the address of game_address, we implement the filter_map() method which filters then maps. * filter_map() takes a closure as an argument which has to return Option<T>. If the closure returns Some(element) then we return the * element. If the closure returns None then we just skip and try the closure on the next element in local_chain. inside the closure, * we use the if let to make sure that each element is an Entry::App variant. If not we return None but if it is, then we use the try_from() * method on the entry_data found in the Entry::App and convert it to the Game struct cos at this point we are sure that the element * is an Entry::App variant that holds the Game struct as AppEntryValue. try_from returns Result<Self, Self::Error> so we use unwrap to get * the Self which in this case is Game. Since at this point, we are sure that there is only one match for the game_address provided * in the parameter, we use the next() to return the element. Since next() returns an Option<T>, we use the ok_or() method to turn * Option<T> to Result<T, E> and E here being the ZomeApiError::HashNotFound variant which indicates that the game_address provided in the * parameter did not match any entry in the local_chain. we return ZomeApiError::HashNotFound because ZomeApiResult expects any variant * of the ZomeApiError to be returned as an error value. */ } pub fn get_moves_local_chain(local_chain: Vec<Entry>, game_address: &Address) -> ZomeApiResult<Vec<Move>> { Ok(local_chain .iter() .filter_map(|entry| { if let Entry::App(entry_type, entry_data) = entry { if entry_type.to_string() == "move" { Some(Move::try_from(entry_data.clone()).unwrap()) } else { None } } else { None } }) .filter(|game_move| { game_move.game == game_address.to_owned() }) .rev() .collect()) /* This one is similar to get_game_local_chain. It takes the local_chain Entries and the game_address as the parameter and returns * a vector of Move wrapped in ZomeApiResult. We first call iter() again then use filter_map() to filter the entries in local chain * to Entry::App variant then if the entry_type (1st element of App variant) is equal to "move" then we return that entry using try_from * method and wrap it in Some(). else we return None if there is no Entry that has the entry_type of "move" and return None also if there * is no Entry:App variant in the local chain. After getting all entries with "move" as the entry_type, we need to filter them and only * yield "move" entry that has the game_address passed in the parameter. That's what the next filter() is for and we check if the game * field of the "move" entry we retrieve from filter_map equals to the game_address being passed in the parameter. We then use rev() to reverse * the iteration when we use the collect() method in order to collect them and turn them into Vec<Move>. // To verify:: why use rev()?? */ } pub fn get_state_local_chain(local_chain: Vec<Entry>, game_address: &Address) -> ZomeApiResult<GameState> { let moves = get_moves_local_chain(local_chain.clone(), game_address)?; let game = get_game_local_chain(local_chain, game_address)?; let new_state = moves.iter().fold(GameState::initial(), move |state, new_move| state.evolve(game.clone(), new_move)); Ok(new_state) /* get_state_local_chain is similar to get_state function. It takes local_chain and game_address as parameters and return the GameState. * we first get all the moves associated with the game_address given as parameter using get_moves_local_chain and store them in moves * variable. Then we get the game struct found at game_address using get_game_local_chain. We then create new_state and call iter() * on moves thne use fold() method. fold() will take an empty GameState sturct and then call evolve method on that empty GameState (sored in * state) to store all the Moves. the move keyword before the closure actually means is to move ownership of all of the captured * variables in the closure rather than borrow. To Verify: Not really sure which captured variables we are moving here rather than borrowing. */ } /*===== End of Local chain functions ======*/ pub fn definition() -> ValidatingEntryType { entry!( name: "game", description: "Represents an occurence of a game between several agents", sharing: Sharing::Public, validation_package: || { hdk::ValidationPackageDefinition::Entry }, validation: | validation_data: hdk::EntryValidationData<Game>| { match validation_data { EntryValidationData::Create{entry, validation_data: _} => { let game = entry as Game; if game.player_1 == game.player_2 { return Err("Player 1 and Player 2 must be different agents.".into()) } Ok(()) }, _ => { Err("Cannot modify or delete a game".into()) } } } ) }
{ Some(Game::try_from(entry_data.clone()).unwrap()) }
conditional_block
game.rs
use std::convert::TryFrom; use hdk::{ utils, entry_definition::ValidatingEntryType, error::{ZomeApiResult, ZomeApiError}, holochain_persistence_api::{ cas::content::{AddressableContent, Address}, }, holochain_json_api::{ error::JsonError, json::JsonString, }, holochain_core_types::{ dna::entry_types::Sharing, validation::EntryValidationData, entry::Entry, link::LinkMatch, } }; use crate::game_move::Move; use crate::GameState; #[derive(Clone, Debug, Serialize, Deserialize, DefaultJson)] pub struct Game { pub player_1: Address, pub player_2: Address, pub created_at: u32, } /*===================================== = DHT Functions = =====================================*/ /// Traverse the linked list rooted at a game to find all the moves pub fn get_moves(game_address: &Address) -> ZomeApiResult<Vec<Move>>
pub fn get_state(game_address: &Address) -> ZomeApiResult<GameState> { let moves = get_moves(game_address)?; let game = get_game(game_address)?; let new_state = moves.iter().fold(GameState::initial(), |state, new_move| state.evolve(game.clone(), new_move)); Ok(new_state) /* get_state takes the address of the game as a parameter and return a ZomeApiResult<GameState>. This is a reducer function. * First we create a vairable named moves and call the get_moves in it with the parameter game_address. * Since we have the ? operator in get_moves(), it will return the value T in Result<T, ZomeApiError> if nothing goes wrong. * T in this case is Vec<Move> which will also be the type of moves variable. next we create the game variable an call the get_game * with the game_address being its argument. get_game also returns ZomeApiResult with Game being the success value so we * use the ? to get the Game struct if no error occurs. with moves and game having the vectors we need, we will now create * a variable name new_state and call iter() on moves to turn it into an Iterator in order for us to call a method fold() on it. * fold() takes two arguments: an initial value, and a closure with two arguments: an 'accumulator', and an element. * The closure returns the value that the accumulator should have for the next iteration. In this case, the initial value is an empty * GameState created with initial() we associated with GameState. Then the accumulator will be named state which will hold the * initial value (empty GameState) we set. new_move will be each Move stored in moves. now we call the evolve() method we associated * with GameState in state.rs. evolve takes self, Game struct, and &Move so we clone game and give it as a first argument and a * reference to moves with new_move(automatically a reference since the element in fold has FnMut implemented). This evolve method will * add all the Move that is in the moves to the GameState which will be stored in new_state. now we can return this as Ok(new_state) */ } pub fn get_game(game_address: &Address) -> ZomeApiResult<Game> { utils::get_as_type(game_address.to_owned()) /* get_as_type load an entry from the given address in the argument then convert it to a given type wrapped in ZomeApiResult. In this case, * rust will infer that the type is Game since that is the return value of get_game function so it will convert the loaded entry from the * given address to ZomeApiResult<Game> */ } /*===== End of DHT Functions ======*/ /*============================================= = Local chain functions = =============================================*/ pub fn get_game_local_chain(local_chain: Vec<Entry>, game_address: &Address) -> ZomeApiResult<Game> { local_chain .iter() .filter(|entry| { entry.address() == game_address.to_owned() }) .filter_map(|entry| { if let Entry::App(_, entry_data) = entry { Some(Game::try_from(entry_data.clone()).unwrap()) } else { None } }) .next() .ok_or(ZomeApiError::HashNotFound) /* get_game_local_chain() gets all the Entry in the local_chain as well as the address of the game and will return ZomeApiResult<Game>. * now we will call the iter() method on the local_chain so that we can call the filter() method. filter() method will create an iterator * which uses a closure to determine if an element should be yielded. the closure must return true or false and if the closure returns * true on that element then filter() will return that element. if its a false it simply runs the same closure on the nexrt element. * now filter's closure check if the address of the each element found in the local_chain is equal to the address of game_address * by getting the address of each element in the localchain using address() method provided for the Entry type in hdk. we need to clone * the game_address because we are passing a reference in the parameter and we cant compare a reference to an actual value(not 100% sure * correct me if im wrong). If the address of the entry matches the game_address that is passed in the paramater, then we return that entry. * After getting all elements that have the address of game_address, we implement the filter_map() method which filters then maps. * filter_map() takes a closure as an argument which has to return Option<T>. If the closure returns Some(element) then we return the * element. If the closure returns None then we just skip and try the closure on the next element in local_chain. inside the closure, * we use the if let to make sure that each element is an Entry::App variant. If not we return None but if it is, then we use the try_from() * method on the entry_data found in the Entry::App and convert it to the Game struct cos at this point we are sure that the element * is an Entry::App variant that holds the Game struct as AppEntryValue. try_from returns Result<Self, Self::Error> so we use unwrap to get * the Self which in this case is Game. Since at this point, we are sure that there is only one match for the game_address provided * in the parameter, we use the next() to return the element. Since next() returns an Option<T>, we use the ok_or() method to turn * Option<T> to Result<T, E> and E here being the ZomeApiError::HashNotFound variant which indicates that the game_address provided in the * parameter did not match any entry in the local_chain. we return ZomeApiError::HashNotFound because ZomeApiResult expects any variant * of the ZomeApiError to be returned as an error value. */ } pub fn get_moves_local_chain(local_chain: Vec<Entry>, game_address: &Address) -> ZomeApiResult<Vec<Move>> { Ok(local_chain .iter() .filter_map(|entry| { if let Entry::App(entry_type, entry_data) = entry { if entry_type.to_string() == "move" { Some(Move::try_from(entry_data.clone()).unwrap()) } else { None } } else { None } }) .filter(|game_move| { game_move.game == game_address.to_owned() }) .rev() .collect()) /* This one is similar to get_game_local_chain. It takes the local_chain Entries and the game_address as the parameter and returns * a vector of Move wrapped in ZomeApiResult. We first call iter() again then use filter_map() to filter the entries in local chain * to Entry::App variant then if the entry_type (1st element of App variant) is equal to "move" then we return that entry using try_from * method and wrap it in Some(). else we return None if there is no Entry that has the entry_type of "move" and return None also if there * is no Entry:App variant in the local chain. After getting all entries with "move" as the entry_type, we need to filter them and only * yield "move" entry that has the game_address passed in the parameter. That's what the next filter() is for and we check if the game * field of the "move" entry we retrieve from filter_map equals to the game_address being passed in the parameter. We then use rev() to reverse * the iteration when we use the collect() method in order to collect them and turn them into Vec<Move>. // To verify:: why use rev()?? */ } pub fn get_state_local_chain(local_chain: Vec<Entry>, game_address: &Address) -> ZomeApiResult<GameState> { let moves = get_moves_local_chain(local_chain.clone(), game_address)?; let game = get_game_local_chain(local_chain, game_address)?; let new_state = moves.iter().fold(GameState::initial(), move |state, new_move| state.evolve(game.clone(), new_move)); Ok(new_state) /* get_state_local_chain is similar to get_state function. It takes local_chain and game_address as parameters and return the GameState. * we first get all the moves associated with the game_address given as parameter using get_moves_local_chain and store them in moves * variable. Then we get the game struct found at game_address using get_game_local_chain. We then create new_state and call iter() * on moves thne use fold() method. fold() will take an empty GameState sturct and then call evolve method on that empty GameState (sored in * state) to store all the Moves. the move keyword before the closure actually means is to move ownership of all of the captured * variables in the closure rather than borrow. To Verify: Not really sure which captured variables we are moving here rather than borrowing. */ } /*===== End of Local chain functions ======*/ pub fn definition() -> ValidatingEntryType { entry!( name: "game", description: "Represents an occurence of a game between several agents", sharing: Sharing::Public, validation_package: || { hdk::ValidationPackageDefinition::Entry }, validation: | validation_data: hdk::EntryValidationData<Game>| { match validation_data { EntryValidationData::Create{entry, validation_data: _} => { let game = entry as Game; if game.player_1 == game.player_2 { return Err("Player 1 and Player 2 must be different agents.".into()) } Ok(()) }, _ => { Err("Cannot modify or delete a game".into()) } } } ) }
{ match hdk::get_links(game_address, LinkMatch::Any, LinkMatch::Any)?.addresses().into_iter().next() { /* get links returns the ZomeApiResult<GetLinksResult>. * This will get entries that are linked to the first argument. * Since ZomeApiResult returns Result<T, ZomeApiError>(where T in this case is the GetLinksResult), * you can use the ? sugar to return the ZomeApiError if error then return the T if get_links is a success. * GetLinkResult has a method implemented called addresses() which returns a vector of Addresses. * into_iter() will iterate through this vector of addresses and move the value from the vector to an Iterator. * next() is a method for iterator where in it returns the next value of the Iterator (start at index 0) in Option<Self::Item> * Since next() returns an Option<Self::Item>, we can use the match operator to cater to all possible values of Option<Self: Item> */ Some(first_move) => { let mut move_addresses = vec![first_move]; let mut more = true; while more { more = match hdk::get_links(move_addresses.last().unwrap(), LinkMatch::Any, LinkMatch::Any)?.addresses().into_iter().next() { Some(addr) => { move_addresses.push(addr.clone()); true }, None => { false }, } } /* In this match operator, we first cater to Some(first_move). The name is first_move because * the Game entry is always linked to the first_move made by Player 2. * So we store this first_move to a vector in a variable name move_addresses. * Then we create a while loop in order to store all the game_move entries that are linked to the first_move. * while more is true, we get the entries linked to the first_move, then the next move and the next move and * on and on and on until we finish all the linked moves. The way this works is, in the first argument of get_links, * we get the very last element of the move_addresses vector using last() which returns a Option<&T>. * Since we want the address itself wrapped in Option<&T>, we will use unwrap() to get the value of the Address. * In this way, we will always have the last address stored in move_addresses as our first argument in get_links.Address. * Then we do the same thing we did above to move the value from a vector of addresses to an Iterator then get the value with next(). * Then we run the match operator again to store the address in the move_addresses using push() then return true to run the loop again. * Since next() returns None if there is no more value to be retrieved in the Iterator, we return false in None so that the loop ends after * we get all the moves that are linked together. */ let moves: Vec<Move> = move_addresses.iter().map(|addr| { let move_entry = hdk::get_entry(addr).unwrap().unwrap(); if let Entry::App(_, move_struct) = move_entry { Move::try_from(move_struct).expect("Entry at address is type other than Move") } else { panic!("Not an app entry!") } }).collect(); /* Now that we have a vector of addresses for all connected moves, we will now try to retrieve the data itself which can * be found in the Addresses we retrieved. First, we create a variable named moves which is a type of Vec<Move>. In this variable, * we will use the iter() method on move_addresses (note that we used iter() instead of into_iter() because we dont want * to move the value from move_addresses but rather have a referennce to the addresses found in the move_addresses.) and then * use map() method provided in Iterator. map() takes a closure and creates an iterator which calls that closure on each element. * the closure will have addr as an argument. The closure creates a variable named move_entry in which we will use the method * get_entry which takes an Address(HashString) type then return ZomeApiResult<Option<Entry>>. We then unwrap it twice to * retrieve the Entry itself. Then we use if let to match the move_entry with an Entry::App variant. This is because Entry * enum can have different variants and we need to makesure that the entry found in this address is an App variant. If not * then we throw a panic in else statement saying that it is not an app entry. Now if it is an app entry, we use the try_from method * to try to convert the Entry::App, which we assume to have the Move struct in the second element of App * variant(here named as move_struct) as the AppEntryValue type, to an actual Move struct. If the try_from fails then we throw an error * saying the Entry at the given address is not a Move type of entry. After we call the closure on all addresses in move_addresses, * we use the collect() to turn them into Vec<Move>. collect() would understand that the items should be collected into Vec<Move> * since that is the defined type for moves. */ Ok(moves) }, None => { Ok(Vec::new()) } } }
identifier_body
server_rpc.py
#!/usr/bin/env python # -*- coding: utf-8 -*- # # king_phisher/server/server_rpc.py # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of the project nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # import threading from king_phisher import geoip from king_phisher import version from king_phisher.server.database import manager as db_manager from king_phisher.server.database import models as db_models VIEW_ROW_COUNT = 50 """The default number of rows to return when one of the /view methods are called.""" DATABASE_TABLES = db_models.DATABASE_TABLES DATABASE_TABLE_OBJECTS = db_models.DATABASE_TABLE_OBJECTS class KingPhisherRequestHandlerRPC(object): """ This superclass of :py:class:`.KingPhisherRequestHandler` maintains all of the RPC call back functions. :RPC API: :ref:`rpc-api-label` """ def install_handlers(self): super(KingPhisherRequestHandlerRPC, self).install_handlers() self.rpc_handler_map['^/ping$'] = self.rpc_ping self.rpc_handler_map['^/shutdown$'] = self.rpc_shutdown self.rpc_handler_map['^/version$'] = self.rpc_version self.rpc_handler_map['^/geoip/lookup$'] = self.rpc_geoip_lookup self.rpc_handler_map['^/geoip/lookup/multi$'] = self.rpc_geoip_lookup_multi self.rpc_handler_map['^/client/initialize$'] = self.rpc_client_initialize self.rpc_handler_map['^/config/get$'] = self.rpc_config_get self.rpc_handler_map['^/config/set$'] = self.rpc_config_set self.rpc_handler_map['^/campaign/alerts/is_subscribed$'] = self.rpc_campaign_alerts_is_subscribed self.rpc_handler_map['^/campaign/alerts/subscribe$'] = self.rpc_campaign_alerts_subscribe self.rpc_handler_map['^/campaign/alerts/unsubscribe$'] = self.rpc_campaign_alerts_unsubscribe self.rpc_handler_map['^/campaign/landing_page/new$'] = self.rpc_campaign_landing_page_new self.rpc_handler_map['^/campaign/message/new$'] = self.rpc_campaign_message_new self.rpc_handler_map['^/campaign/new$'] = self.rpc_campaign_new self.rpc_handler_map['^/campaign/delete$'] = self.rpc_campaign_delete for table_name in DATABASE_TABLES.keys(): self.rpc_handler_map['^/' + table_name + '/count$'] = self.rpc_database_count_rows self.rpc_handler_map['^/' + table_name + '/delete$'] = self.rpc_database_delete_row_by_id self.rpc_handler_map['^/' + table_name + '/delete/multi'] = self.rpc_database_delete_rows_by_id self.rpc_handler_map['^/' + table_name + '/get$'] = self.rpc_database_get_row_by_id self.rpc_handler_map['^/' + table_name + '/insert'] = self.rpc_database_insert_row self.rpc_handler_map['^/' + table_name + '/set$'] = self.rpc_database_set_row_value self.rpc_handler_map['^/' + table_name + '/view$'] = self.rpc_database_get_rows # Tables with a campaign_id field for table_name in db_models.get_tables_with_column_id('campaign_id'):
# Tables with a message_id field for table_name in db_models.get_tables_with_column_id('message_id'): self.rpc_handler_map['^/message/' + table_name + '/count$'] = self.rpc_database_count_rows self.rpc_handler_map['^/message/' + table_name + '/view$'] = self.rpc_database_get_rows def rpc_ping(self): """ An RPC method that can be used by clients to assert the status and responsiveness of this server. :return: This method always returns True. :rtype: bool """ return True def rpc_client_initialize(self): """ Initialize any client information necessary. :return: This method always returns True. :rtype: bool """ username = self.basic_auth_user if not username: return True session = db_manager.Session() if not db_manager.get_row_by_id(session, db_models.User, username): user = db_models.User(id=username) session.add(user) session.commit() session.close() return True def rpc_shutdown(self): """ This method can be used to shut down the server. This function will return, however no subsequent requests will be processed. """ shutdown_thread = threading.Thread(target=self.server.shutdown) shutdown_thread.start() return def rpc_version(self): """ Get the version information of the server. This returns a dictionary with keys of version, version_info and rpc_api_version. These values are provided for the client to determine compatibility. :return: A dictionary with version information. :rtype: dict """ vinfo = {'version': version.version, 'version_info': version.version_info._asdict()} vinfo['rpc_api_version'] = version.rpc_api_version return vinfo def rpc_config_get(self, option_name): """ Retrieve a value from the server's configuration. :param str option_name: The name of the configuration option. :return: The option's value. """ if isinstance(option_name, (list, tuple)): option_names = option_name option_values = {} for option_name in option_names: if self.config.has_option(option_name): option_values[option_name] = self.config.get(option_name) return option_values elif self.config.has_option(option_name): return self.config.get(option_name) return def rpc_config_set(self, options): """ Set options in the server's configuration. Any changes to the server's configuration are not written to disk. :param dict options: A dictionary of option names and values """ for option_name, option_value in options.items(): self.config.set(option_name, option_value) return def rpc_campaign_new(self, name): """ Create a new King Phisher campaign and initialize the database information. :param str name: The new campaign's name. :return: The ID of the new campaign. :rtype: int """ session = db_manager.Session() campaign = db_models.Campaign(name=name, user_id=self.basic_auth_user) session.add(campaign) session.commit() return campaign.id def rpc_campaign_alerts_is_subscribed(self, campaign_id): """ Check if the user is subscribed to alerts for the specified campaign. :param int campaign_id: The ID of the campaign. :return: The alert subscription status. :rtype: bool """ username = self.basic_auth_user session = db_manager.Session() query = session.query(db_models.AlertSubscription) query = query.filter_by(campaign_id=campaign_id, user_id=username) result = query.count() session.close() return result def rpc_campaign_alerts_subscribe(self, campaign_id): """ Subscribe to alerts for the specified campaign. :param int campaign_id: The ID of the campaign. """ username = self.basic_auth_user session = db_manager.Session() query = session.query(db_models.AlertSubscription) query = query.filter_by(campaign_id=campaign_id, user_id=username) if query.count() == 0: subscription = db_models.AlertSubscription(campaign_id=campaign_id, user_id=username) session.add(subscription) session.commit() session.close() return def rpc_campaign_alerts_unsubscribe(self, campaign_id): """ Unsubscribe to alerts for the specified campaign. :param int campaign_id: The ID of the campaign. """ username = self.basic_auth_user session = db_manager.Session() query = session.query(db_models.AlertSubscription) query = query.filter_by(campaign_id=campaign_id, user_id=username) subscription = query.first() if subscription: session.delete(subscription) session.commit() session.close() return def rpc_campaign_landing_page_new(self, campaign_id, hostname, page): """ Add a landing page for the specified campaign. Landing pages refer to resources that when visited by a user should cause the visit counter to be incremented. :param int campaign_id: The ID of the campaign. :param str hostname: The VHOST for the request. :param str page: The request resource. """ page = page.lstrip('/') session = db_manager.Session() query = session.query(db_models.LandingPage) query = query.filter_by(campaign_id=campaign_id, hostname=hostname, page=page) if query.count() == 0: landing_page = db_models.LandingPage(campaign_id=campaign_id, hostname=hostname, page=page) session.add(landing_page) session.commit() session.close() return def rpc_campaign_message_new(self, campaign_id, email_id, target_email, company_name, first_name, last_name): """ Record a message that has been sent as part of a campaign. These details can be retrieved later for value substitution in template pages. :param int campaign_id: The ID of the campaign. :param str email_id: The message id of the sent email. :param str target_email: The email address that the message was sent to. :param str company_name: The company name value for the message. :param str first_name: The first name of the message's recipient. :param str last_name: The last name of the message's recipient. """ session = db_manager.Session() message = db_models.Message() message.id = email_id message.campaign_id = campaign_id message.target_email = target_email message.company_name = company_name message.first_name = first_name message.last_name = last_name session.add(message) session.commit() session.close() return def rpc_campaign_delete(self, campaign_id): """ Remove a campaign from the database and delete all associated information with it. .. warning:: This action can not be reversed and there is no confirmation before it takes place. """ session = db_manager.Session() session.delete(db_manager.get_row_by_id(session, db_models.Campaign, campaign_id)) session.commit() session.close() return def rpc_database_count_rows(self, *args): """ Get a count of the rows in the specified table where the search criteria matches. :return: The number of matching rows. :rtype: int """ args = list(args) fields = self.path.split('/')[1:-2] assert len(fields) == len(args) table = DATABASE_TABLE_OBJECTS.get(self.path.split('/')[-2]) assert table session = db_manager.Session() query = session.query(table) query = query.filter_by(**dict(zip((f + '_id' for f in fields), args))) result = query.count() session.close() return result def rpc_database_get_rows(self, *args): """ Retrieve the rows from the specified table where the search criteria matches. :return: A dictionary with columns and rows keys. :rtype: dict """ args = list(args) offset = 0 fields = self.path.split('/')[1:-2] if len(args) == (len(fields) + 1): offset = (args.pop() * VIEW_ROW_COUNT) assert len(fields) == len(args) table_name = self.path.split('/')[-2] table = DATABASE_TABLE_OBJECTS.get(table_name) assert table # it's critical that the columns are in the order that the client is expecting columns = DATABASE_TABLES[table_name] rows = [] session = db_manager.Session() query = session.query(table) query = query.filter_by(**dict(zip((f + '_id' for f in fields), args))) for row in query[offset:offset + VIEW_ROW_COUNT]: rows.append([getattr(row, c) for c in columns]) session.close() if not len(rows): return None return {'columns': columns, 'rows': rows} def rpc_database_delete_row_by_id(self, row_id): """ Delete a row from a table with the specified value in the id column. :param row_id: The id value. """ table = DATABASE_TABLE_OBJECTS.get(self.path.split('/')[-2]) assert table session = db_manager.Session() try: session.delete(db_manager.get_row_by_id(session, table, row_id)) session.commit() finally: session.close() return def rpc_database_delete_rows_by_id(self, row_ids): """ Delete multiple rows from a table with the specified values in the id column. If a row id specified in *row_ids* does not exist, then it will be skipped and no error will be thrown. :param list row_ids: The row ids to delete. :return: The row ids that were deleted. :rtype: list """ table = DATABASE_TABLE_OBJECTS.get(self.path.split('/')[-3]) assert table deleted_rows = [] session = db_manager.Session() try: for row_id in row_ids: row = db_manager.get_row_by_id(session, table, row_id) if not row: continue session.delete(row) deleted_rows.append(row_id) session.commit() finally: session.close() return deleted_rows def rpc_database_get_row_by_id(self, row_id): """ Retrieve a row from a given table with the specified value in the id column. :param row_id: The id value. :return: The specified row data. :rtype: dict """ table_name = self.path.split('/')[-2] table = DATABASE_TABLE_OBJECTS.get(table_name) assert table columns = DATABASE_TABLES[table_name] session = db_manager.Session() row = db_manager.get_row_by_id(session, table, row_id) if row: row = dict(zip(columns, (getattr(row, c) for c in columns))) session.close() return row def rpc_database_insert_row(self, keys, values): """ Insert a new row into the specified table. :param tuple keys: The column names of *values*. :param tuple values: The values to be inserted in the row. """ if not isinstance(keys, (list, tuple)): keys = (keys,) if not isinstance(values, (list, tuple)): values = (values,) assert len(keys) == len(values) table_name = self.path.split('/')[-2] for key, value in zip(keys, values): assert key in DATABASE_TABLES[table_name] table = DATABASE_TABLE_OBJECTS.get(table_name) assert table session = db_manager.Session() row = table() for key, value in zip(keys, values): setattr(row, key, value) session.add(row) session.close() return def rpc_database_set_row_value(self, row_id, keys, values): """ Set values for a row in the specified table with an id of *row_id*. :param tuple keys: The column names of *values*. :param tuple values: The values to be updated in the row. """ if not isinstance(keys, (list, tuple)): keys = (keys,) if not isinstance(values, (list, tuple)): values = (values,) assert len(keys) == len(values) table_name = self.path.split('/')[-2] for key, value in zip(keys, values): assert key in DATABASE_TABLES[table_name] table = DATABASE_TABLE_OBJECTS.get(table_name) assert table session = db_manager.Session() row = db_manager.get_row_by_id(session, table, row_id) if not row: session.close() assert row for key, value in zip(keys, values): setattr(row, key, value) session.commit() session.close() return def rpc_geoip_lookup(self, ip, lang=None): """ Look up an IP address in the servers GeoIP database. If the IP address can not be found in the database, None will be returned. :param str ip: The IP address to look up. :param str lang: The language to prefer for regional names. :return: The geographic information for the specified IP address. :rtype: dict """ try: result = geoip.lookup(ip, lang=lang) except geoip.AddressNotFoundError: result = None return result def rpc_geoip_lookup_multi(self, ips, lang=None): """ Look up multiple IP addresses in the servers GeoIP database. Each IP address that can not be found in the database will have its result set to None. :param list ips: The list of IP addresses to look up. :param str lang: The language to prefer for regional names. :return: A dictionary containing the results keyed by the specified IP addresses. :rtype: dict """ results = {} for ip in ips: try: result = geoip.lookup(ip, lang=lang) except geoip.AddressNotFoundError: result = None results[ip] = result return results
self.rpc_handler_map['^/campaign/' + table_name + '/count$'] = self.rpc_database_count_rows self.rpc_handler_map['^/campaign/' + table_name + '/view$'] = self.rpc_database_get_rows
conditional_block
server_rpc.py
#!/usr/bin/env python # -*- coding: utf-8 -*- # # king_phisher/server/server_rpc.py # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of the project nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # import threading from king_phisher import geoip from king_phisher import version from king_phisher.server.database import manager as db_manager from king_phisher.server.database import models as db_models VIEW_ROW_COUNT = 50 """The default number of rows to return when one of the /view methods are called."""
class KingPhisherRequestHandlerRPC(object): """ This superclass of :py:class:`.KingPhisherRequestHandler` maintains all of the RPC call back functions. :RPC API: :ref:`rpc-api-label` """ def install_handlers(self): super(KingPhisherRequestHandlerRPC, self).install_handlers() self.rpc_handler_map['^/ping$'] = self.rpc_ping self.rpc_handler_map['^/shutdown$'] = self.rpc_shutdown self.rpc_handler_map['^/version$'] = self.rpc_version self.rpc_handler_map['^/geoip/lookup$'] = self.rpc_geoip_lookup self.rpc_handler_map['^/geoip/lookup/multi$'] = self.rpc_geoip_lookup_multi self.rpc_handler_map['^/client/initialize$'] = self.rpc_client_initialize self.rpc_handler_map['^/config/get$'] = self.rpc_config_get self.rpc_handler_map['^/config/set$'] = self.rpc_config_set self.rpc_handler_map['^/campaign/alerts/is_subscribed$'] = self.rpc_campaign_alerts_is_subscribed self.rpc_handler_map['^/campaign/alerts/subscribe$'] = self.rpc_campaign_alerts_subscribe self.rpc_handler_map['^/campaign/alerts/unsubscribe$'] = self.rpc_campaign_alerts_unsubscribe self.rpc_handler_map['^/campaign/landing_page/new$'] = self.rpc_campaign_landing_page_new self.rpc_handler_map['^/campaign/message/new$'] = self.rpc_campaign_message_new self.rpc_handler_map['^/campaign/new$'] = self.rpc_campaign_new self.rpc_handler_map['^/campaign/delete$'] = self.rpc_campaign_delete for table_name in DATABASE_TABLES.keys(): self.rpc_handler_map['^/' + table_name + '/count$'] = self.rpc_database_count_rows self.rpc_handler_map['^/' + table_name + '/delete$'] = self.rpc_database_delete_row_by_id self.rpc_handler_map['^/' + table_name + '/delete/multi'] = self.rpc_database_delete_rows_by_id self.rpc_handler_map['^/' + table_name + '/get$'] = self.rpc_database_get_row_by_id self.rpc_handler_map['^/' + table_name + '/insert'] = self.rpc_database_insert_row self.rpc_handler_map['^/' + table_name + '/set$'] = self.rpc_database_set_row_value self.rpc_handler_map['^/' + table_name + '/view$'] = self.rpc_database_get_rows # Tables with a campaign_id field for table_name in db_models.get_tables_with_column_id('campaign_id'): self.rpc_handler_map['^/campaign/' + table_name + '/count$'] = self.rpc_database_count_rows self.rpc_handler_map['^/campaign/' + table_name + '/view$'] = self.rpc_database_get_rows # Tables with a message_id field for table_name in db_models.get_tables_with_column_id('message_id'): self.rpc_handler_map['^/message/' + table_name + '/count$'] = self.rpc_database_count_rows self.rpc_handler_map['^/message/' + table_name + '/view$'] = self.rpc_database_get_rows def rpc_ping(self): """ An RPC method that can be used by clients to assert the status and responsiveness of this server. :return: This method always returns True. :rtype: bool """ return True def rpc_client_initialize(self): """ Initialize any client information necessary. :return: This method always returns True. :rtype: bool """ username = self.basic_auth_user if not username: return True session = db_manager.Session() if not db_manager.get_row_by_id(session, db_models.User, username): user = db_models.User(id=username) session.add(user) session.commit() session.close() return True def rpc_shutdown(self): """ This method can be used to shut down the server. This function will return, however no subsequent requests will be processed. """ shutdown_thread = threading.Thread(target=self.server.shutdown) shutdown_thread.start() return def rpc_version(self): """ Get the version information of the server. This returns a dictionary with keys of version, version_info and rpc_api_version. These values are provided for the client to determine compatibility. :return: A dictionary with version information. :rtype: dict """ vinfo = {'version': version.version, 'version_info': version.version_info._asdict()} vinfo['rpc_api_version'] = version.rpc_api_version return vinfo def rpc_config_get(self, option_name): """ Retrieve a value from the server's configuration. :param str option_name: The name of the configuration option. :return: The option's value. """ if isinstance(option_name, (list, tuple)): option_names = option_name option_values = {} for option_name in option_names: if self.config.has_option(option_name): option_values[option_name] = self.config.get(option_name) return option_values elif self.config.has_option(option_name): return self.config.get(option_name) return def rpc_config_set(self, options): """ Set options in the server's configuration. Any changes to the server's configuration are not written to disk. :param dict options: A dictionary of option names and values """ for option_name, option_value in options.items(): self.config.set(option_name, option_value) return def rpc_campaign_new(self, name): """ Create a new King Phisher campaign and initialize the database information. :param str name: The new campaign's name. :return: The ID of the new campaign. :rtype: int """ session = db_manager.Session() campaign = db_models.Campaign(name=name, user_id=self.basic_auth_user) session.add(campaign) session.commit() return campaign.id def rpc_campaign_alerts_is_subscribed(self, campaign_id): """ Check if the user is subscribed to alerts for the specified campaign. :param int campaign_id: The ID of the campaign. :return: The alert subscription status. :rtype: bool """ username = self.basic_auth_user session = db_manager.Session() query = session.query(db_models.AlertSubscription) query = query.filter_by(campaign_id=campaign_id, user_id=username) result = query.count() session.close() return result def rpc_campaign_alerts_subscribe(self, campaign_id): """ Subscribe to alerts for the specified campaign. :param int campaign_id: The ID of the campaign. """ username = self.basic_auth_user session = db_manager.Session() query = session.query(db_models.AlertSubscription) query = query.filter_by(campaign_id=campaign_id, user_id=username) if query.count() == 0: subscription = db_models.AlertSubscription(campaign_id=campaign_id, user_id=username) session.add(subscription) session.commit() session.close() return def rpc_campaign_alerts_unsubscribe(self, campaign_id): """ Unsubscribe to alerts for the specified campaign. :param int campaign_id: The ID of the campaign. """ username = self.basic_auth_user session = db_manager.Session() query = session.query(db_models.AlertSubscription) query = query.filter_by(campaign_id=campaign_id, user_id=username) subscription = query.first() if subscription: session.delete(subscription) session.commit() session.close() return def rpc_campaign_landing_page_new(self, campaign_id, hostname, page): """ Add a landing page for the specified campaign. Landing pages refer to resources that when visited by a user should cause the visit counter to be incremented. :param int campaign_id: The ID of the campaign. :param str hostname: The VHOST for the request. :param str page: The request resource. """ page = page.lstrip('/') session = db_manager.Session() query = session.query(db_models.LandingPage) query = query.filter_by(campaign_id=campaign_id, hostname=hostname, page=page) if query.count() == 0: landing_page = db_models.LandingPage(campaign_id=campaign_id, hostname=hostname, page=page) session.add(landing_page) session.commit() session.close() return def rpc_campaign_message_new(self, campaign_id, email_id, target_email, company_name, first_name, last_name): """ Record a message that has been sent as part of a campaign. These details can be retrieved later for value substitution in template pages. :param int campaign_id: The ID of the campaign. :param str email_id: The message id of the sent email. :param str target_email: The email address that the message was sent to. :param str company_name: The company name value for the message. :param str first_name: The first name of the message's recipient. :param str last_name: The last name of the message's recipient. """ session = db_manager.Session() message = db_models.Message() message.id = email_id message.campaign_id = campaign_id message.target_email = target_email message.company_name = company_name message.first_name = first_name message.last_name = last_name session.add(message) session.commit() session.close() return def rpc_campaign_delete(self, campaign_id): """ Remove a campaign from the database and delete all associated information with it. .. warning:: This action can not be reversed and there is no confirmation before it takes place. """ session = db_manager.Session() session.delete(db_manager.get_row_by_id(session, db_models.Campaign, campaign_id)) session.commit() session.close() return def rpc_database_count_rows(self, *args): """ Get a count of the rows in the specified table where the search criteria matches. :return: The number of matching rows. :rtype: int """ args = list(args) fields = self.path.split('/')[1:-2] assert len(fields) == len(args) table = DATABASE_TABLE_OBJECTS.get(self.path.split('/')[-2]) assert table session = db_manager.Session() query = session.query(table) query = query.filter_by(**dict(zip((f + '_id' for f in fields), args))) result = query.count() session.close() return result def rpc_database_get_rows(self, *args): """ Retrieve the rows from the specified table where the search criteria matches. :return: A dictionary with columns and rows keys. :rtype: dict """ args = list(args) offset = 0 fields = self.path.split('/')[1:-2] if len(args) == (len(fields) + 1): offset = (args.pop() * VIEW_ROW_COUNT) assert len(fields) == len(args) table_name = self.path.split('/')[-2] table = DATABASE_TABLE_OBJECTS.get(table_name) assert table # it's critical that the columns are in the order that the client is expecting columns = DATABASE_TABLES[table_name] rows = [] session = db_manager.Session() query = session.query(table) query = query.filter_by(**dict(zip((f + '_id' for f in fields), args))) for row in query[offset:offset + VIEW_ROW_COUNT]: rows.append([getattr(row, c) for c in columns]) session.close() if not len(rows): return None return {'columns': columns, 'rows': rows} def rpc_database_delete_row_by_id(self, row_id): """ Delete a row from a table with the specified value in the id column. :param row_id: The id value. """ table = DATABASE_TABLE_OBJECTS.get(self.path.split('/')[-2]) assert table session = db_manager.Session() try: session.delete(db_manager.get_row_by_id(session, table, row_id)) session.commit() finally: session.close() return def rpc_database_delete_rows_by_id(self, row_ids): """ Delete multiple rows from a table with the specified values in the id column. If a row id specified in *row_ids* does not exist, then it will be skipped and no error will be thrown. :param list row_ids: The row ids to delete. :return: The row ids that were deleted. :rtype: list """ table = DATABASE_TABLE_OBJECTS.get(self.path.split('/')[-3]) assert table deleted_rows = [] session = db_manager.Session() try: for row_id in row_ids: row = db_manager.get_row_by_id(session, table, row_id) if not row: continue session.delete(row) deleted_rows.append(row_id) session.commit() finally: session.close() return deleted_rows def rpc_database_get_row_by_id(self, row_id): """ Retrieve a row from a given table with the specified value in the id column. :param row_id: The id value. :return: The specified row data. :rtype: dict """ table_name = self.path.split('/')[-2] table = DATABASE_TABLE_OBJECTS.get(table_name) assert table columns = DATABASE_TABLES[table_name] session = db_manager.Session() row = db_manager.get_row_by_id(session, table, row_id) if row: row = dict(zip(columns, (getattr(row, c) for c in columns))) session.close() return row def rpc_database_insert_row(self, keys, values): """ Insert a new row into the specified table. :param tuple keys: The column names of *values*. :param tuple values: The values to be inserted in the row. """ if not isinstance(keys, (list, tuple)): keys = (keys,) if not isinstance(values, (list, tuple)): values = (values,) assert len(keys) == len(values) table_name = self.path.split('/')[-2] for key, value in zip(keys, values): assert key in DATABASE_TABLES[table_name] table = DATABASE_TABLE_OBJECTS.get(table_name) assert table session = db_manager.Session() row = table() for key, value in zip(keys, values): setattr(row, key, value) session.add(row) session.close() return def rpc_database_set_row_value(self, row_id, keys, values): """ Set values for a row in the specified table with an id of *row_id*. :param tuple keys: The column names of *values*. :param tuple values: The values to be updated in the row. """ if not isinstance(keys, (list, tuple)): keys = (keys,) if not isinstance(values, (list, tuple)): values = (values,) assert len(keys) == len(values) table_name = self.path.split('/')[-2] for key, value in zip(keys, values): assert key in DATABASE_TABLES[table_name] table = DATABASE_TABLE_OBJECTS.get(table_name) assert table session = db_manager.Session() row = db_manager.get_row_by_id(session, table, row_id) if not row: session.close() assert row for key, value in zip(keys, values): setattr(row, key, value) session.commit() session.close() return def rpc_geoip_lookup(self, ip, lang=None): """ Look up an IP address in the servers GeoIP database. If the IP address can not be found in the database, None will be returned. :param str ip: The IP address to look up. :param str lang: The language to prefer for regional names. :return: The geographic information for the specified IP address. :rtype: dict """ try: result = geoip.lookup(ip, lang=lang) except geoip.AddressNotFoundError: result = None return result def rpc_geoip_lookup_multi(self, ips, lang=None): """ Look up multiple IP addresses in the servers GeoIP database. Each IP address that can not be found in the database will have its result set to None. :param list ips: The list of IP addresses to look up. :param str lang: The language to prefer for regional names. :return: A dictionary containing the results keyed by the specified IP addresses. :rtype: dict """ results = {} for ip in ips: try: result = geoip.lookup(ip, lang=lang) except geoip.AddressNotFoundError: result = None results[ip] = result return results
DATABASE_TABLES = db_models.DATABASE_TABLES DATABASE_TABLE_OBJECTS = db_models.DATABASE_TABLE_OBJECTS
random_line_split
server_rpc.py
#!/usr/bin/env python # -*- coding: utf-8 -*- # # king_phisher/server/server_rpc.py # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of the project nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # import threading from king_phisher import geoip from king_phisher import version from king_phisher.server.database import manager as db_manager from king_phisher.server.database import models as db_models VIEW_ROW_COUNT = 50 """The default number of rows to return when one of the /view methods are called.""" DATABASE_TABLES = db_models.DATABASE_TABLES DATABASE_TABLE_OBJECTS = db_models.DATABASE_TABLE_OBJECTS class KingPhisherRequestHandlerRPC(object): """ This superclass of :py:class:`.KingPhisherRequestHandler` maintains all of the RPC call back functions. :RPC API: :ref:`rpc-api-label` """ def install_handlers(self): super(KingPhisherRequestHandlerRPC, self).install_handlers() self.rpc_handler_map['^/ping$'] = self.rpc_ping self.rpc_handler_map['^/shutdown$'] = self.rpc_shutdown self.rpc_handler_map['^/version$'] = self.rpc_version self.rpc_handler_map['^/geoip/lookup$'] = self.rpc_geoip_lookup self.rpc_handler_map['^/geoip/lookup/multi$'] = self.rpc_geoip_lookup_multi self.rpc_handler_map['^/client/initialize$'] = self.rpc_client_initialize self.rpc_handler_map['^/config/get$'] = self.rpc_config_get self.rpc_handler_map['^/config/set$'] = self.rpc_config_set self.rpc_handler_map['^/campaign/alerts/is_subscribed$'] = self.rpc_campaign_alerts_is_subscribed self.rpc_handler_map['^/campaign/alerts/subscribe$'] = self.rpc_campaign_alerts_subscribe self.rpc_handler_map['^/campaign/alerts/unsubscribe$'] = self.rpc_campaign_alerts_unsubscribe self.rpc_handler_map['^/campaign/landing_page/new$'] = self.rpc_campaign_landing_page_new self.rpc_handler_map['^/campaign/message/new$'] = self.rpc_campaign_message_new self.rpc_handler_map['^/campaign/new$'] = self.rpc_campaign_new self.rpc_handler_map['^/campaign/delete$'] = self.rpc_campaign_delete for table_name in DATABASE_TABLES.keys(): self.rpc_handler_map['^/' + table_name + '/count$'] = self.rpc_database_count_rows self.rpc_handler_map['^/' + table_name + '/delete$'] = self.rpc_database_delete_row_by_id self.rpc_handler_map['^/' + table_name + '/delete/multi'] = self.rpc_database_delete_rows_by_id self.rpc_handler_map['^/' + table_name + '/get$'] = self.rpc_database_get_row_by_id self.rpc_handler_map['^/' + table_name + '/insert'] = self.rpc_database_insert_row self.rpc_handler_map['^/' + table_name + '/set$'] = self.rpc_database_set_row_value self.rpc_handler_map['^/' + table_name + '/view$'] = self.rpc_database_get_rows # Tables with a campaign_id field for table_name in db_models.get_tables_with_column_id('campaign_id'): self.rpc_handler_map['^/campaign/' + table_name + '/count$'] = self.rpc_database_count_rows self.rpc_handler_map['^/campaign/' + table_name + '/view$'] = self.rpc_database_get_rows # Tables with a message_id field for table_name in db_models.get_tables_with_column_id('message_id'): self.rpc_handler_map['^/message/' + table_name + '/count$'] = self.rpc_database_count_rows self.rpc_handler_map['^/message/' + table_name + '/view$'] = self.rpc_database_get_rows def rpc_ping(self): """ An RPC method that can be used by clients to assert the status and responsiveness of this server. :return: This method always returns True. :rtype: bool """ return True def rpc_client_initialize(self): """ Initialize any client information necessary. :return: This method always returns True. :rtype: bool """ username = self.basic_auth_user if not username: return True session = db_manager.Session() if not db_manager.get_row_by_id(session, db_models.User, username): user = db_models.User(id=username) session.add(user) session.commit() session.close() return True def rpc_shutdown(self): """ This method can be used to shut down the server. This function will return, however no subsequent requests will be processed. """ shutdown_thread = threading.Thread(target=self.server.shutdown) shutdown_thread.start() return def rpc_version(self): """ Get the version information of the server. This returns a dictionary with keys of version, version_info and rpc_api_version. These values are provided for the client to determine compatibility. :return: A dictionary with version information. :rtype: dict """ vinfo = {'version': version.version, 'version_info': version.version_info._asdict()} vinfo['rpc_api_version'] = version.rpc_api_version return vinfo def rpc_config_get(self, option_name): """ Retrieve a value from the server's configuration. :param str option_name: The name of the configuration option. :return: The option's value. """ if isinstance(option_name, (list, tuple)): option_names = option_name option_values = {} for option_name in option_names: if self.config.has_option(option_name): option_values[option_name] = self.config.get(option_name) return option_values elif self.config.has_option(option_name): return self.config.get(option_name) return def rpc_config_set(self, options): """ Set options in the server's configuration. Any changes to the server's configuration are not written to disk. :param dict options: A dictionary of option names and values """ for option_name, option_value in options.items(): self.config.set(option_name, option_value) return def rpc_campaign_new(self, name): """ Create a new King Phisher campaign and initialize the database information. :param str name: The new campaign's name. :return: The ID of the new campaign. :rtype: int """ session = db_manager.Session() campaign = db_models.Campaign(name=name, user_id=self.basic_auth_user) session.add(campaign) session.commit() return campaign.id def rpc_campaign_alerts_is_subscribed(self, campaign_id): """ Check if the user is subscribed to alerts for the specified campaign. :param int campaign_id: The ID of the campaign. :return: The alert subscription status. :rtype: bool """ username = self.basic_auth_user session = db_manager.Session() query = session.query(db_models.AlertSubscription) query = query.filter_by(campaign_id=campaign_id, user_id=username) result = query.count() session.close() return result def rpc_campaign_alerts_subscribe(self, campaign_id): """ Subscribe to alerts for the specified campaign. :param int campaign_id: The ID of the campaign. """ username = self.basic_auth_user session = db_manager.Session() query = session.query(db_models.AlertSubscription) query = query.filter_by(campaign_id=campaign_id, user_id=username) if query.count() == 0: subscription = db_models.AlertSubscription(campaign_id=campaign_id, user_id=username) session.add(subscription) session.commit() session.close() return def rpc_campaign_alerts_unsubscribe(self, campaign_id): """ Unsubscribe to alerts for the specified campaign. :param int campaign_id: The ID of the campaign. """ username = self.basic_auth_user session = db_manager.Session() query = session.query(db_models.AlertSubscription) query = query.filter_by(campaign_id=campaign_id, user_id=username) subscription = query.first() if subscription: session.delete(subscription) session.commit() session.close() return def rpc_campaign_landing_page_new(self, campaign_id, hostname, page):
def rpc_campaign_message_new(self, campaign_id, email_id, target_email, company_name, first_name, last_name): """ Record a message that has been sent as part of a campaign. These details can be retrieved later for value substitution in template pages. :param int campaign_id: The ID of the campaign. :param str email_id: The message id of the sent email. :param str target_email: The email address that the message was sent to. :param str company_name: The company name value for the message. :param str first_name: The first name of the message's recipient. :param str last_name: The last name of the message's recipient. """ session = db_manager.Session() message = db_models.Message() message.id = email_id message.campaign_id = campaign_id message.target_email = target_email message.company_name = company_name message.first_name = first_name message.last_name = last_name session.add(message) session.commit() session.close() return def rpc_campaign_delete(self, campaign_id): """ Remove a campaign from the database and delete all associated information with it. .. warning:: This action can not be reversed and there is no confirmation before it takes place. """ session = db_manager.Session() session.delete(db_manager.get_row_by_id(session, db_models.Campaign, campaign_id)) session.commit() session.close() return def rpc_database_count_rows(self, *args): """ Get a count of the rows in the specified table where the search criteria matches. :return: The number of matching rows. :rtype: int """ args = list(args) fields = self.path.split('/')[1:-2] assert len(fields) == len(args) table = DATABASE_TABLE_OBJECTS.get(self.path.split('/')[-2]) assert table session = db_manager.Session() query = session.query(table) query = query.filter_by(**dict(zip((f + '_id' for f in fields), args))) result = query.count() session.close() return result def rpc_database_get_rows(self, *args): """ Retrieve the rows from the specified table where the search criteria matches. :return: A dictionary with columns and rows keys. :rtype: dict """ args = list(args) offset = 0 fields = self.path.split('/')[1:-2] if len(args) == (len(fields) + 1): offset = (args.pop() * VIEW_ROW_COUNT) assert len(fields) == len(args) table_name = self.path.split('/')[-2] table = DATABASE_TABLE_OBJECTS.get(table_name) assert table # it's critical that the columns are in the order that the client is expecting columns = DATABASE_TABLES[table_name] rows = [] session = db_manager.Session() query = session.query(table) query = query.filter_by(**dict(zip((f + '_id' for f in fields), args))) for row in query[offset:offset + VIEW_ROW_COUNT]: rows.append([getattr(row, c) for c in columns]) session.close() if not len(rows): return None return {'columns': columns, 'rows': rows} def rpc_database_delete_row_by_id(self, row_id): """ Delete a row from a table with the specified value in the id column. :param row_id: The id value. """ table = DATABASE_TABLE_OBJECTS.get(self.path.split('/')[-2]) assert table session = db_manager.Session() try: session.delete(db_manager.get_row_by_id(session, table, row_id)) session.commit() finally: session.close() return def rpc_database_delete_rows_by_id(self, row_ids): """ Delete multiple rows from a table with the specified values in the id column. If a row id specified in *row_ids* does not exist, then it will be skipped and no error will be thrown. :param list row_ids: The row ids to delete. :return: The row ids that were deleted. :rtype: list """ table = DATABASE_TABLE_OBJECTS.get(self.path.split('/')[-3]) assert table deleted_rows = [] session = db_manager.Session() try: for row_id in row_ids: row = db_manager.get_row_by_id(session, table, row_id) if not row: continue session.delete(row) deleted_rows.append(row_id) session.commit() finally: session.close() return deleted_rows def rpc_database_get_row_by_id(self, row_id): """ Retrieve a row from a given table with the specified value in the id column. :param row_id: The id value. :return: The specified row data. :rtype: dict """ table_name = self.path.split('/')[-2] table = DATABASE_TABLE_OBJECTS.get(table_name) assert table columns = DATABASE_TABLES[table_name] session = db_manager.Session() row = db_manager.get_row_by_id(session, table, row_id) if row: row = dict(zip(columns, (getattr(row, c) for c in columns))) session.close() return row def rpc_database_insert_row(self, keys, values): """ Insert a new row into the specified table. :param tuple keys: The column names of *values*. :param tuple values: The values to be inserted in the row. """ if not isinstance(keys, (list, tuple)): keys = (keys,) if not isinstance(values, (list, tuple)): values = (values,) assert len(keys) == len(values) table_name = self.path.split('/')[-2] for key, value in zip(keys, values): assert key in DATABASE_TABLES[table_name] table = DATABASE_TABLE_OBJECTS.get(table_name) assert table session = db_manager.Session() row = table() for key, value in zip(keys, values): setattr(row, key, value) session.add(row) session.close() return def rpc_database_set_row_value(self, row_id, keys, values): """ Set values for a row in the specified table with an id of *row_id*. :param tuple keys: The column names of *values*. :param tuple values: The values to be updated in the row. """ if not isinstance(keys, (list, tuple)): keys = (keys,) if not isinstance(values, (list, tuple)): values = (values,) assert len(keys) == len(values) table_name = self.path.split('/')[-2] for key, value in zip(keys, values): assert key in DATABASE_TABLES[table_name] table = DATABASE_TABLE_OBJECTS.get(table_name) assert table session = db_manager.Session() row = db_manager.get_row_by_id(session, table, row_id) if not row: session.close() assert row for key, value in zip(keys, values): setattr(row, key, value) session.commit() session.close() return def rpc_geoip_lookup(self, ip, lang=None): """ Look up an IP address in the servers GeoIP database. If the IP address can not be found in the database, None will be returned. :param str ip: The IP address to look up. :param str lang: The language to prefer for regional names. :return: The geographic information for the specified IP address. :rtype: dict """ try: result = geoip.lookup(ip, lang=lang) except geoip.AddressNotFoundError: result = None return result def rpc_geoip_lookup_multi(self, ips, lang=None): """ Look up multiple IP addresses in the servers GeoIP database. Each IP address that can not be found in the database will have its result set to None. :param list ips: The list of IP addresses to look up. :param str lang: The language to prefer for regional names. :return: A dictionary containing the results keyed by the specified IP addresses. :rtype: dict """ results = {} for ip in ips: try: result = geoip.lookup(ip, lang=lang) except geoip.AddressNotFoundError: result = None results[ip] = result return results
""" Add a landing page for the specified campaign. Landing pages refer to resources that when visited by a user should cause the visit counter to be incremented. :param int campaign_id: The ID of the campaign. :param str hostname: The VHOST for the request. :param str page: The request resource. """ page = page.lstrip('/') session = db_manager.Session() query = session.query(db_models.LandingPage) query = query.filter_by(campaign_id=campaign_id, hostname=hostname, page=page) if query.count() == 0: landing_page = db_models.LandingPage(campaign_id=campaign_id, hostname=hostname, page=page) session.add(landing_page) session.commit() session.close() return
identifier_body
server_rpc.py
#!/usr/bin/env python # -*- coding: utf-8 -*- # # king_phisher/server/server_rpc.py # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of the project nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # import threading from king_phisher import geoip from king_phisher import version from king_phisher.server.database import manager as db_manager from king_phisher.server.database import models as db_models VIEW_ROW_COUNT = 50 """The default number of rows to return when one of the /view methods are called.""" DATABASE_TABLES = db_models.DATABASE_TABLES DATABASE_TABLE_OBJECTS = db_models.DATABASE_TABLE_OBJECTS class KingPhisherRequestHandlerRPC(object): """ This superclass of :py:class:`.KingPhisherRequestHandler` maintains all of the RPC call back functions. :RPC API: :ref:`rpc-api-label` """ def install_handlers(self): super(KingPhisherRequestHandlerRPC, self).install_handlers() self.rpc_handler_map['^/ping$'] = self.rpc_ping self.rpc_handler_map['^/shutdown$'] = self.rpc_shutdown self.rpc_handler_map['^/version$'] = self.rpc_version self.rpc_handler_map['^/geoip/lookup$'] = self.rpc_geoip_lookup self.rpc_handler_map['^/geoip/lookup/multi$'] = self.rpc_geoip_lookup_multi self.rpc_handler_map['^/client/initialize$'] = self.rpc_client_initialize self.rpc_handler_map['^/config/get$'] = self.rpc_config_get self.rpc_handler_map['^/config/set$'] = self.rpc_config_set self.rpc_handler_map['^/campaign/alerts/is_subscribed$'] = self.rpc_campaign_alerts_is_subscribed self.rpc_handler_map['^/campaign/alerts/subscribe$'] = self.rpc_campaign_alerts_subscribe self.rpc_handler_map['^/campaign/alerts/unsubscribe$'] = self.rpc_campaign_alerts_unsubscribe self.rpc_handler_map['^/campaign/landing_page/new$'] = self.rpc_campaign_landing_page_new self.rpc_handler_map['^/campaign/message/new$'] = self.rpc_campaign_message_new self.rpc_handler_map['^/campaign/new$'] = self.rpc_campaign_new self.rpc_handler_map['^/campaign/delete$'] = self.rpc_campaign_delete for table_name in DATABASE_TABLES.keys(): self.rpc_handler_map['^/' + table_name + '/count$'] = self.rpc_database_count_rows self.rpc_handler_map['^/' + table_name + '/delete$'] = self.rpc_database_delete_row_by_id self.rpc_handler_map['^/' + table_name + '/delete/multi'] = self.rpc_database_delete_rows_by_id self.rpc_handler_map['^/' + table_name + '/get$'] = self.rpc_database_get_row_by_id self.rpc_handler_map['^/' + table_name + '/insert'] = self.rpc_database_insert_row self.rpc_handler_map['^/' + table_name + '/set$'] = self.rpc_database_set_row_value self.rpc_handler_map['^/' + table_name + '/view$'] = self.rpc_database_get_rows # Tables with a campaign_id field for table_name in db_models.get_tables_with_column_id('campaign_id'): self.rpc_handler_map['^/campaign/' + table_name + '/count$'] = self.rpc_database_count_rows self.rpc_handler_map['^/campaign/' + table_name + '/view$'] = self.rpc_database_get_rows # Tables with a message_id field for table_name in db_models.get_tables_with_column_id('message_id'): self.rpc_handler_map['^/message/' + table_name + '/count$'] = self.rpc_database_count_rows self.rpc_handler_map['^/message/' + table_name + '/view$'] = self.rpc_database_get_rows def rpc_ping(self): """ An RPC method that can be used by clients to assert the status and responsiveness of this server. :return: This method always returns True. :rtype: bool """ return True def rpc_client_initialize(self): """ Initialize any client information necessary. :return: This method always returns True. :rtype: bool """ username = self.basic_auth_user if not username: return True session = db_manager.Session() if not db_manager.get_row_by_id(session, db_models.User, username): user = db_models.User(id=username) session.add(user) session.commit() session.close() return True def rpc_shutdown(self): """ This method can be used to shut down the server. This function will return, however no subsequent requests will be processed. """ shutdown_thread = threading.Thread(target=self.server.shutdown) shutdown_thread.start() return def rpc_version(self): """ Get the version information of the server. This returns a dictionary with keys of version, version_info and rpc_api_version. These values are provided for the client to determine compatibility. :return: A dictionary with version information. :rtype: dict """ vinfo = {'version': version.version, 'version_info': version.version_info._asdict()} vinfo['rpc_api_version'] = version.rpc_api_version return vinfo def rpc_config_get(self, option_name): """ Retrieve a value from the server's configuration. :param str option_name: The name of the configuration option. :return: The option's value. """ if isinstance(option_name, (list, tuple)): option_names = option_name option_values = {} for option_name in option_names: if self.config.has_option(option_name): option_values[option_name] = self.config.get(option_name) return option_values elif self.config.has_option(option_name): return self.config.get(option_name) return def rpc_config_set(self, options): """ Set options in the server's configuration. Any changes to the server's configuration are not written to disk. :param dict options: A dictionary of option names and values """ for option_name, option_value in options.items(): self.config.set(option_name, option_value) return def rpc_campaign_new(self, name): """ Create a new King Phisher campaign and initialize the database information. :param str name: The new campaign's name. :return: The ID of the new campaign. :rtype: int """ session = db_manager.Session() campaign = db_models.Campaign(name=name, user_id=self.basic_auth_user) session.add(campaign) session.commit() return campaign.id def rpc_campaign_alerts_is_subscribed(self, campaign_id): """ Check if the user is subscribed to alerts for the specified campaign. :param int campaign_id: The ID of the campaign. :return: The alert subscription status. :rtype: bool """ username = self.basic_auth_user session = db_manager.Session() query = session.query(db_models.AlertSubscription) query = query.filter_by(campaign_id=campaign_id, user_id=username) result = query.count() session.close() return result def rpc_campaign_alerts_subscribe(self, campaign_id): """ Subscribe to alerts for the specified campaign. :param int campaign_id: The ID of the campaign. """ username = self.basic_auth_user session = db_manager.Session() query = session.query(db_models.AlertSubscription) query = query.filter_by(campaign_id=campaign_id, user_id=username) if query.count() == 0: subscription = db_models.AlertSubscription(campaign_id=campaign_id, user_id=username) session.add(subscription) session.commit() session.close() return def rpc_campaign_alerts_unsubscribe(self, campaign_id): """ Unsubscribe to alerts for the specified campaign. :param int campaign_id: The ID of the campaign. """ username = self.basic_auth_user session = db_manager.Session() query = session.query(db_models.AlertSubscription) query = query.filter_by(campaign_id=campaign_id, user_id=username) subscription = query.first() if subscription: session.delete(subscription) session.commit() session.close() return def rpc_campaign_landing_page_new(self, campaign_id, hostname, page): """ Add a landing page for the specified campaign. Landing pages refer to resources that when visited by a user should cause the visit counter to be incremented. :param int campaign_id: The ID of the campaign. :param str hostname: The VHOST for the request. :param str page: The request resource. """ page = page.lstrip('/') session = db_manager.Session() query = session.query(db_models.LandingPage) query = query.filter_by(campaign_id=campaign_id, hostname=hostname, page=page) if query.count() == 0: landing_page = db_models.LandingPage(campaign_id=campaign_id, hostname=hostname, page=page) session.add(landing_page) session.commit() session.close() return def rpc_campaign_message_new(self, campaign_id, email_id, target_email, company_name, first_name, last_name): """ Record a message that has been sent as part of a campaign. These details can be retrieved later for value substitution in template pages. :param int campaign_id: The ID of the campaign. :param str email_id: The message id of the sent email. :param str target_email: The email address that the message was sent to. :param str company_name: The company name value for the message. :param str first_name: The first name of the message's recipient. :param str last_name: The last name of the message's recipient. """ session = db_manager.Session() message = db_models.Message() message.id = email_id message.campaign_id = campaign_id message.target_email = target_email message.company_name = company_name message.first_name = first_name message.last_name = last_name session.add(message) session.commit() session.close() return def rpc_campaign_delete(self, campaign_id): """ Remove a campaign from the database and delete all associated information with it. .. warning:: This action can not be reversed and there is no confirmation before it takes place. """ session = db_manager.Session() session.delete(db_manager.get_row_by_id(session, db_models.Campaign, campaign_id)) session.commit() session.close() return def rpc_database_count_rows(self, *args): """ Get a count of the rows in the specified table where the search criteria matches. :return: The number of matching rows. :rtype: int """ args = list(args) fields = self.path.split('/')[1:-2] assert len(fields) == len(args) table = DATABASE_TABLE_OBJECTS.get(self.path.split('/')[-2]) assert table session = db_manager.Session() query = session.query(table) query = query.filter_by(**dict(zip((f + '_id' for f in fields), args))) result = query.count() session.close() return result def rpc_database_get_rows(self, *args): """ Retrieve the rows from the specified table where the search criteria matches. :return: A dictionary with columns and rows keys. :rtype: dict """ args = list(args) offset = 0 fields = self.path.split('/')[1:-2] if len(args) == (len(fields) + 1): offset = (args.pop() * VIEW_ROW_COUNT) assert len(fields) == len(args) table_name = self.path.split('/')[-2] table = DATABASE_TABLE_OBJECTS.get(table_name) assert table # it's critical that the columns are in the order that the client is expecting columns = DATABASE_TABLES[table_name] rows = [] session = db_manager.Session() query = session.query(table) query = query.filter_by(**dict(zip((f + '_id' for f in fields), args))) for row in query[offset:offset + VIEW_ROW_COUNT]: rows.append([getattr(row, c) for c in columns]) session.close() if not len(rows): return None return {'columns': columns, 'rows': rows} def rpc_database_delete_row_by_id(self, row_id): """ Delete a row from a table with the specified value in the id column. :param row_id: The id value. """ table = DATABASE_TABLE_OBJECTS.get(self.path.split('/')[-2]) assert table session = db_manager.Session() try: session.delete(db_manager.get_row_by_id(session, table, row_id)) session.commit() finally: session.close() return def rpc_database_delete_rows_by_id(self, row_ids): """ Delete multiple rows from a table with the specified values in the id column. If a row id specified in *row_ids* does not exist, then it will be skipped and no error will be thrown. :param list row_ids: The row ids to delete. :return: The row ids that were deleted. :rtype: list """ table = DATABASE_TABLE_OBJECTS.get(self.path.split('/')[-3]) assert table deleted_rows = [] session = db_manager.Session() try: for row_id in row_ids: row = db_manager.get_row_by_id(session, table, row_id) if not row: continue session.delete(row) deleted_rows.append(row_id) session.commit() finally: session.close() return deleted_rows def rpc_database_get_row_by_id(self, row_id): """ Retrieve a row from a given table with the specified value in the id column. :param row_id: The id value. :return: The specified row data. :rtype: dict """ table_name = self.path.split('/')[-2] table = DATABASE_TABLE_OBJECTS.get(table_name) assert table columns = DATABASE_TABLES[table_name] session = db_manager.Session() row = db_manager.get_row_by_id(session, table, row_id) if row: row = dict(zip(columns, (getattr(row, c) for c in columns))) session.close() return row def
(self, keys, values): """ Insert a new row into the specified table. :param tuple keys: The column names of *values*. :param tuple values: The values to be inserted in the row. """ if not isinstance(keys, (list, tuple)): keys = (keys,) if not isinstance(values, (list, tuple)): values = (values,) assert len(keys) == len(values) table_name = self.path.split('/')[-2] for key, value in zip(keys, values): assert key in DATABASE_TABLES[table_name] table = DATABASE_TABLE_OBJECTS.get(table_name) assert table session = db_manager.Session() row = table() for key, value in zip(keys, values): setattr(row, key, value) session.add(row) session.close() return def rpc_database_set_row_value(self, row_id, keys, values): """ Set values for a row in the specified table with an id of *row_id*. :param tuple keys: The column names of *values*. :param tuple values: The values to be updated in the row. """ if not isinstance(keys, (list, tuple)): keys = (keys,) if not isinstance(values, (list, tuple)): values = (values,) assert len(keys) == len(values) table_name = self.path.split('/')[-2] for key, value in zip(keys, values): assert key in DATABASE_TABLES[table_name] table = DATABASE_TABLE_OBJECTS.get(table_name) assert table session = db_manager.Session() row = db_manager.get_row_by_id(session, table, row_id) if not row: session.close() assert row for key, value in zip(keys, values): setattr(row, key, value) session.commit() session.close() return def rpc_geoip_lookup(self, ip, lang=None): """ Look up an IP address in the servers GeoIP database. If the IP address can not be found in the database, None will be returned. :param str ip: The IP address to look up. :param str lang: The language to prefer for regional names. :return: The geographic information for the specified IP address. :rtype: dict """ try: result = geoip.lookup(ip, lang=lang) except geoip.AddressNotFoundError: result = None return result def rpc_geoip_lookup_multi(self, ips, lang=None): """ Look up multiple IP addresses in the servers GeoIP database. Each IP address that can not be found in the database will have its result set to None. :param list ips: The list of IP addresses to look up. :param str lang: The language to prefer for regional names. :return: A dictionary containing the results keyed by the specified IP addresses. :rtype: dict """ results = {} for ip in ips: try: result = geoip.lookup(ip, lang=lang) except geoip.AddressNotFoundError: result = None results[ip] = result return results
rpc_database_insert_row
identifier_name
server.rs
use crate::adapter::Adapter; use crate::socket::{ subscribe_socket_to_transport_events, Callback, Socket, SocketCloseReason, SocketEvent, }; use crate::transport::{Transport, TransportCreateData, TransportKind}; use crate::util::{HttpMethod, RequestContext, SendPacketError, ServerError, SetCookie}; use dashmap::DashMap; use engine_io_parser::packet::{Packet, PacketData}; use std::sync::Arc; use std::sync::Mutex; use tokio::sync::{broadcast, mpsc}; use uuid::Uuid; pub const BUFFER_CONST: usize = 32; #[derive(Debug, Clone, PartialEq)] pub struct ServerOptions { pub ping_timeout: u32, pub ping_interval: u32, pub upgrade_timeout: u32, pub transports: Vec<TransportKind>, pub allow_upgrades: bool, pub initial_packet: Option<Packet>, // TODO: implement this // pub allow_request: Option<Box<dyn (Fn() -> bool) + Send + 'static>>, pub cookie: Option<CookieOptions>, // TODO: node ws-specific options: // - maxHttpBufferSize // - perMessageDeflate // - httpCompression // -- cors pub buffer_factor: usize, } #[derive(Debug, Clone, PartialEq)] pub struct CookieOptions { pub name: String, pub path: String, pub http_only: bool, } #[derive(Debug, Clone)] pub struct EventSenders { // Event sender to external owner server: bmrng::RequestSender<ServerEvent, Packet>, // server: broadcast::Sender<ServerEvent>, /// Event sender to Socket instances. cloned and passed over client: mpsc::Sender<SocketEvent>, } #[derive(Debug)] pub enum ServerState { Unsubscribed { socket_event_receiver: mpsc::Receiver<SocketEvent>, engine_event_receiver: bmrng::RequestReceiver<ServerEvent, Packet>, }, Subscribed, } pub struct Server<A: 'static + Adapter> { state: Arc<Mutex<ServerState>>, // TODO: don't use a mutex here, instead have an internal socket state clients: Arc<DashMap<String, Arc<Socket<A>>>>, event_senders: EventSenders, // TODO: ping timeout handler EngineIoSocketTimeoutHandler pub options: ServerOptions, } impl Default for ServerOptions { fn default() -> Self { ServerOptions { ping_timeout: 5000, ping_interval: 25000, upgrade_timeout: 10000, transports: vec![TransportKind::WebSocket, TransportKind::Polling], allow_upgrades: true, initial_packet: None, cookie: Some(CookieOptions::default()), // allow_request: None, buffer_factor: 2, } } } impl Default for CookieOptions { fn default() -> Self { CookieOptions { name: "io".to_owned(), path: "/".to_owned(), http_only: true, } } } #[derive(Display, Debug, Clone, PartialEq)] pub enum ServerEvent { /// Socket ID Connection { connection_id: String, }, Close { connection_id: String, reason: SocketCloseReason, }, Flush { connection_id: String, }, Drain { connection_id: String, }, Message { connection_id: String, context: Arc<RequestContext>, data: PacketData, }, Error { connection_id: String, }, } impl<A: 'static + Adapter> Server<A> { pub fn new(options: ServerOptions) -> Self { // To listen events from socket instances let (client_event_sender, client_event_receiver) = mpsc::channel(options.buffer_factor * BUFFER_CONST); // To send events to the owner of this Server instance let (server_event_sender, server_event_receiver) = bmrng::channel(options.buffer_factor * BUFFER_CONST); Server { state: Arc::new(Mutex::new(ServerState::Unsubscribed { socket_event_receiver: client_event_receiver, engine_event_receiver: server_event_receiver, })), clients: Arc::new(DashMap::new()), event_senders: EventSenders { server: server_event_sender, client: client_event_sender, }, options, } } pub fn subscribe(&self) -> bmrng::RequestReceiver<ServerEvent, Packet>
pub fn try_subscribe( &self, ) -> Result<bmrng::RequestReceiver<ServerEvent, Packet>, AlreadySubscribedError> { let mut state = self.state.lock().unwrap(); let old_state = std::mem::replace(&mut *state, ServerState::Subscribed); match old_state { ServerState::Subscribed => Err(AlreadySubscribedError), ServerState::Unsubscribed { socket_event_receiver, engine_event_receiver, } => { // First time calling subscribe, also start listening events from `Socket` instances self.subscribe_to_socket_events(socket_event_receiver); Ok(engine_event_receiver) } } // TODO: handle shutdown properly by receiving a shutdown signal // sending it to socket instances. } pub async fn close(&self) { // TODO: consider sending signals or dropping channels instead of closing them like this? // TODO: or drop the whole thing. The server, the sockets, everything. todo!(); // for socket in self.clients.iter() { // socket.value().close(true); // } } pub async fn close_socket(&self, connection_id: &str) { if let Some((_key, socket)) = self.clients.remove(connection_id) { // TODO: convert this to drop todo!(); // socket.close(true); } } // TODO: consider converting ack callbacks into optional async Results? // `connection_id` is an owned string just because of a Rust compiler issue. pub async fn send_packet_with_ack( &self, connection_id: String, packet: Packet, callback: Option<Callback>, ) -> Result<(), SendPacketError> { match self.clients.get(&connection_id) { Some(client) => Ok(client.send_packet(packet, None).await), None => Err(SendPacketError::UnknownConnectionId), } } pub async fn send_packet( &self, connection_id: String, packet: Packet, ) -> Result<(), SendPacketError> { match self.clients.get(&connection_id) { Some(client) => Ok(client.send_packet(packet, None).await), None => Err(SendPacketError::UnknownConnectionId), } } pub async fn handle_request( &self, context: RequestContext, body: Option<A::Body>, ) -> Result<A::Response, ServerError> { let context = Arc::new(context); let sid_ref = context.query.get("sid"); let sid = sid_ref.map(|s| s.to_owned()); self.verify_request(sid_ref, false, context.transport_kind, context.http_method) .await?; if let Some(sid) = sid { let client = self.get_client_or_error(&sid)?; let response = client.handle_polling_request(context.clone(), body).await?; Ok(response) } else { let (sid, response) = self.handshake(context, HandshakeData::Polling).await?; Ok(response) } } /// Akin to `onWebSocket` from engine.io js // TODO: handle errors, socket closure etc. pub async fn handle_upgrade(&self, context: RequestContext, socket: A::WsHandle) { let context = Arc::new(context); let sid_ref = context.query.get("sid"); let sid = sid_ref.map(|s| s.to_owned()); if let Some(sid) = sid { // TODO: don't panic let client = self.get_client_or_error(&sid).expect("TODO: fix this"); client.maybe_upgrade(context, todo!()); // TODO: implement this! // let client = // TODO: call socket.maybe_upgrade() } else { self.handshake(context, HandshakeData::WebSocket { socket }) .await; todo!(); } } pub async fn verify_request( &self, sid: Option<&String>, upgrade: bool, transport_kind: TransportKind, http_method: HttpMethod, ) -> Result<(), ServerError> { if let Some(sid) = sid { let client = self.clients.get(sid); if let Some(client) = client { let client_transport_kind = client.get_transport_kind(); if !upgrade && Some(transport_kind) != client_transport_kind { return Err(ServerError::BadRequest); } } else { return Err(ServerError::UnknownSid); } } else { if http_method != HttpMethod::Get { return Err(ServerError::BadHandshakeMethod); } // FIXME: fix allow_request calls /*if let Some(validator) = &self.options.allow_request { // FIXME: pass some request parameters to this validator // to make it useful let valid = validator(); if !valid { return Err(ServerError::BadRequest); } }*/ } Ok(()) } /// Generate a new ID for a client. /// Note: This generates IDs in a different format from the original JS /// engine.io implementation, which uses a library called /// [base64id](https://www.npmjs.com/package/base64id) that doesn't seem /// to guarantee uniqueness. pub fn generate_id() -> String { Uuid::new_v4().to_hyphenated().to_string() } /// Returns the new client ID pub async fn handshake( &self, context: Arc<RequestContext>, data: HandshakeData<A::WsHandle>, ) -> Result<(String, A::Response), ServerError> { let sid = Self::generate_id(); let supports_binary = !context.query.contains_key("b64"); let jsonp = !supports_binary && !context.query.contains_key("j"); let context = Arc::new(context.with_set_cookie(SetCookie::from_cookie_options( &self.options.cookie, sid.clone(), ))); let transport_create_data = match data { HandshakeData::Polling => TransportCreateData::Polling { jsonp }, HandshakeData::WebSocket { socket } => TransportCreateData::WebSocket { supports_binary, socket, }, }; let socket = Arc::new(Socket::new( sid.clone(), context.clone(), self.event_senders.client.clone(), transport_create_data, )); self.clients.insert(sid.clone(), socket.clone()); socket.open(&self.options).await; // TODO: send this initial packet in the handshake request response? // so we'd need to return it to the adapter if let Some(initial_message_packet) = self.options.initial_packet.clone() { socket.send_packet(initial_message_packet, None).await; } subscribe_socket_to_transport_events(socket).await; let response = { let client = self.get_client_or_error(&sid)?; match client.get_transport_or_fail()?.as_ref() { Transport::Polling(_) => Ok(client.handle_polling_request(context, None).await?), _ => Err(ServerError::BadRequest), } }; // Emit a "connection" event. This is an internal event that's used by socket_io let _ = self .event_senders .server .clone() .send(ServerEvent::Connection { connection_id: sid.clone(), }); response.map(|response| Ok((sid, response)))? } pub fn clients_count(&self) -> usize { self.clients.len() } pub fn get_client_or_error(&self, id: &str) -> Result<Arc<Socket<A>>, ServerError> { if let Some(client) = self.clients.get(id) { Ok(client.value().clone()) } else { Err(ServerError::UnknownSid) } } fn subscribe_to_socket_events(&self, client_event_receiver: mpsc::Receiver<SocketEvent>) { // TODO: listen for responder responses on fallible events let server_event_sender = self.event_senders.server.clone(); let clients = self.clients.clone(); tokio::spawn(async move { let mut receiver = client_event_receiver; while let Some(message) = receiver.recv().await { match message { SocketEvent::Close { socket_id, reason } => { clients.remove(&socket_id); let _ = server_event_sender.send(ServerEvent::Close { connection_id: socket_id, reason, }); } SocketEvent::Flush { socket_id } => { // Forward the Flush event to the external listener let _ = server_event_sender.send(ServerEvent::Flush { connection_id: socket_id, }); } SocketEvent::Drain { socket_id } => { // Forward the Drain event to the external listener let _ = server_event_sender.send(ServerEvent::Drain { connection_id: socket_id, }); } SocketEvent::Message { socket_id, context, data, } => { // Forward the Drain event to the external listener let _ = server_event_sender.send(ServerEvent::Message { connection_id: socket_id, context, data, }); } SocketEvent::Error { socket_id } => { let _ = server_event_sender.send(ServerEvent::Error { connection_id: socket_id, }); } _ => {} } } }); } fn subscribe_to_commands(&self) { // TODO: receive packet send requests using a MPSC listener ? todo!(); } } #[derive(Debug)] pub enum HandshakeData<S> where S: 'static, { Polling, WebSocket { socket: S }, } #[derive(Debug)] pub struct AlreadySubscribedError; impl std::fmt::Display for AlreadySubscribedError { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!( f, "Already subscribed to this server, cannot subscribe again" ) } } impl std::error::Error for AlreadySubscribedError {}
{ self.try_subscribe() .expect("Already subscribed to engine_io_server::Server") }
identifier_body
server.rs
use crate::adapter::Adapter; use crate::socket::{ subscribe_socket_to_transport_events, Callback, Socket, SocketCloseReason, SocketEvent, }; use crate::transport::{Transport, TransportCreateData, TransportKind}; use crate::util::{HttpMethod, RequestContext, SendPacketError, ServerError, SetCookie}; use dashmap::DashMap; use engine_io_parser::packet::{Packet, PacketData}; use std::sync::Arc; use std::sync::Mutex; use tokio::sync::{broadcast, mpsc}; use uuid::Uuid; pub const BUFFER_CONST: usize = 32; #[derive(Debug, Clone, PartialEq)] pub struct ServerOptions { pub ping_timeout: u32, pub ping_interval: u32, pub upgrade_timeout: u32, pub transports: Vec<TransportKind>, pub allow_upgrades: bool, pub initial_packet: Option<Packet>, // TODO: implement this // pub allow_request: Option<Box<dyn (Fn() -> bool) + Send + 'static>>, pub cookie: Option<CookieOptions>, // TODO: node ws-specific options: // - maxHttpBufferSize // - perMessageDeflate // - httpCompression // -- cors pub buffer_factor: usize, } #[derive(Debug, Clone, PartialEq)] pub struct CookieOptions { pub name: String, pub path: String, pub http_only: bool, } #[derive(Debug, Clone)] pub struct EventSenders { // Event sender to external owner server: bmrng::RequestSender<ServerEvent, Packet>, // server: broadcast::Sender<ServerEvent>, /// Event sender to Socket instances. cloned and passed over client: mpsc::Sender<SocketEvent>, } #[derive(Debug)] pub enum ServerState { Unsubscribed { socket_event_receiver: mpsc::Receiver<SocketEvent>, engine_event_receiver: bmrng::RequestReceiver<ServerEvent, Packet>, }, Subscribed, } pub struct Server<A: 'static + Adapter> { state: Arc<Mutex<ServerState>>, // TODO: don't use a mutex here, instead have an internal socket state clients: Arc<DashMap<String, Arc<Socket<A>>>>, event_senders: EventSenders, // TODO: ping timeout handler EngineIoSocketTimeoutHandler pub options: ServerOptions, } impl Default for ServerOptions { fn default() -> Self { ServerOptions { ping_timeout: 5000, ping_interval: 25000, upgrade_timeout: 10000, transports: vec![TransportKind::WebSocket, TransportKind::Polling], allow_upgrades: true, initial_packet: None, cookie: Some(CookieOptions::default()), // allow_request: None, buffer_factor: 2, } } } impl Default for CookieOptions { fn default() -> Self { CookieOptions { name: "io".to_owned(), path: "/".to_owned(), http_only: true, } } } #[derive(Display, Debug, Clone, PartialEq)] pub enum ServerEvent { /// Socket ID Connection { connection_id: String, }, Close { connection_id: String, reason: SocketCloseReason, }, Flush { connection_id: String, }, Drain { connection_id: String, }, Message { connection_id: String, context: Arc<RequestContext>, data: PacketData, }, Error { connection_id: String, }, } impl<A: 'static + Adapter> Server<A> { pub fn new(options: ServerOptions) -> Self { // To listen events from socket instances let (client_event_sender, client_event_receiver) = mpsc::channel(options.buffer_factor * BUFFER_CONST); // To send events to the owner of this Server instance let (server_event_sender, server_event_receiver) = bmrng::channel(options.buffer_factor * BUFFER_CONST); Server { state: Arc::new(Mutex::new(ServerState::Unsubscribed { socket_event_receiver: client_event_receiver, engine_event_receiver: server_event_receiver, })), clients: Arc::new(DashMap::new()), event_senders: EventSenders { server: server_event_sender, client: client_event_sender, }, options, } } pub fn subscribe(&self) -> bmrng::RequestReceiver<ServerEvent, Packet> { self.try_subscribe() .expect("Already subscribed to engine_io_server::Server") } pub fn try_subscribe( &self, ) -> Result<bmrng::RequestReceiver<ServerEvent, Packet>, AlreadySubscribedError> { let mut state = self.state.lock().unwrap(); let old_state = std::mem::replace(&mut *state, ServerState::Subscribed); match old_state { ServerState::Subscribed => Err(AlreadySubscribedError), ServerState::Unsubscribed { socket_event_receiver, engine_event_receiver, } => { // First time calling subscribe, also start listening events from `Socket` instances self.subscribe_to_socket_events(socket_event_receiver); Ok(engine_event_receiver) } } // TODO: handle shutdown properly by receiving a shutdown signal // sending it to socket instances. } pub async fn close(&self) {
// TODO: or drop the whole thing. The server, the sockets, everything. todo!(); // for socket in self.clients.iter() { // socket.value().close(true); // } } pub async fn close_socket(&self, connection_id: &str) { if let Some((_key, socket)) = self.clients.remove(connection_id) { // TODO: convert this to drop todo!(); // socket.close(true); } } // TODO: consider converting ack callbacks into optional async Results? // `connection_id` is an owned string just because of a Rust compiler issue. pub async fn send_packet_with_ack( &self, connection_id: String, packet: Packet, callback: Option<Callback>, ) -> Result<(), SendPacketError> { match self.clients.get(&connection_id) { Some(client) => Ok(client.send_packet(packet, None).await), None => Err(SendPacketError::UnknownConnectionId), } } pub async fn send_packet( &self, connection_id: String, packet: Packet, ) -> Result<(), SendPacketError> { match self.clients.get(&connection_id) { Some(client) => Ok(client.send_packet(packet, None).await), None => Err(SendPacketError::UnknownConnectionId), } } pub async fn handle_request( &self, context: RequestContext, body: Option<A::Body>, ) -> Result<A::Response, ServerError> { let context = Arc::new(context); let sid_ref = context.query.get("sid"); let sid = sid_ref.map(|s| s.to_owned()); self.verify_request(sid_ref, false, context.transport_kind, context.http_method) .await?; if let Some(sid) = sid { let client = self.get_client_or_error(&sid)?; let response = client.handle_polling_request(context.clone(), body).await?; Ok(response) } else { let (sid, response) = self.handshake(context, HandshakeData::Polling).await?; Ok(response) } } /// Akin to `onWebSocket` from engine.io js // TODO: handle errors, socket closure etc. pub async fn handle_upgrade(&self, context: RequestContext, socket: A::WsHandle) { let context = Arc::new(context); let sid_ref = context.query.get("sid"); let sid = sid_ref.map(|s| s.to_owned()); if let Some(sid) = sid { // TODO: don't panic let client = self.get_client_or_error(&sid).expect("TODO: fix this"); client.maybe_upgrade(context, todo!()); // TODO: implement this! // let client = // TODO: call socket.maybe_upgrade() } else { self.handshake(context, HandshakeData::WebSocket { socket }) .await; todo!(); } } pub async fn verify_request( &self, sid: Option<&String>, upgrade: bool, transport_kind: TransportKind, http_method: HttpMethod, ) -> Result<(), ServerError> { if let Some(sid) = sid { let client = self.clients.get(sid); if let Some(client) = client { let client_transport_kind = client.get_transport_kind(); if !upgrade && Some(transport_kind) != client_transport_kind { return Err(ServerError::BadRequest); } } else { return Err(ServerError::UnknownSid); } } else { if http_method != HttpMethod::Get { return Err(ServerError::BadHandshakeMethod); } // FIXME: fix allow_request calls /*if let Some(validator) = &self.options.allow_request { // FIXME: pass some request parameters to this validator // to make it useful let valid = validator(); if !valid { return Err(ServerError::BadRequest); } }*/ } Ok(()) } /// Generate a new ID for a client. /// Note: This generates IDs in a different format from the original JS /// engine.io implementation, which uses a library called /// [base64id](https://www.npmjs.com/package/base64id) that doesn't seem /// to guarantee uniqueness. pub fn generate_id() -> String { Uuid::new_v4().to_hyphenated().to_string() } /// Returns the new client ID pub async fn handshake( &self, context: Arc<RequestContext>, data: HandshakeData<A::WsHandle>, ) -> Result<(String, A::Response), ServerError> { let sid = Self::generate_id(); let supports_binary = !context.query.contains_key("b64"); let jsonp = !supports_binary && !context.query.contains_key("j"); let context = Arc::new(context.with_set_cookie(SetCookie::from_cookie_options( &self.options.cookie, sid.clone(), ))); let transport_create_data = match data { HandshakeData::Polling => TransportCreateData::Polling { jsonp }, HandshakeData::WebSocket { socket } => TransportCreateData::WebSocket { supports_binary, socket, }, }; let socket = Arc::new(Socket::new( sid.clone(), context.clone(), self.event_senders.client.clone(), transport_create_data, )); self.clients.insert(sid.clone(), socket.clone()); socket.open(&self.options).await; // TODO: send this initial packet in the handshake request response? // so we'd need to return it to the adapter if let Some(initial_message_packet) = self.options.initial_packet.clone() { socket.send_packet(initial_message_packet, None).await; } subscribe_socket_to_transport_events(socket).await; let response = { let client = self.get_client_or_error(&sid)?; match client.get_transport_or_fail()?.as_ref() { Transport::Polling(_) => Ok(client.handle_polling_request(context, None).await?), _ => Err(ServerError::BadRequest), } }; // Emit a "connection" event. This is an internal event that's used by socket_io let _ = self .event_senders .server .clone() .send(ServerEvent::Connection { connection_id: sid.clone(), }); response.map(|response| Ok((sid, response)))? } pub fn clients_count(&self) -> usize { self.clients.len() } pub fn get_client_or_error(&self, id: &str) -> Result<Arc<Socket<A>>, ServerError> { if let Some(client) = self.clients.get(id) { Ok(client.value().clone()) } else { Err(ServerError::UnknownSid) } } fn subscribe_to_socket_events(&self, client_event_receiver: mpsc::Receiver<SocketEvent>) { // TODO: listen for responder responses on fallible events let server_event_sender = self.event_senders.server.clone(); let clients = self.clients.clone(); tokio::spawn(async move { let mut receiver = client_event_receiver; while let Some(message) = receiver.recv().await { match message { SocketEvent::Close { socket_id, reason } => { clients.remove(&socket_id); let _ = server_event_sender.send(ServerEvent::Close { connection_id: socket_id, reason, }); } SocketEvent::Flush { socket_id } => { // Forward the Flush event to the external listener let _ = server_event_sender.send(ServerEvent::Flush { connection_id: socket_id, }); } SocketEvent::Drain { socket_id } => { // Forward the Drain event to the external listener let _ = server_event_sender.send(ServerEvent::Drain { connection_id: socket_id, }); } SocketEvent::Message { socket_id, context, data, } => { // Forward the Drain event to the external listener let _ = server_event_sender.send(ServerEvent::Message { connection_id: socket_id, context, data, }); } SocketEvent::Error { socket_id } => { let _ = server_event_sender.send(ServerEvent::Error { connection_id: socket_id, }); } _ => {} } } }); } fn subscribe_to_commands(&self) { // TODO: receive packet send requests using a MPSC listener ? todo!(); } } #[derive(Debug)] pub enum HandshakeData<S> where S: 'static, { Polling, WebSocket { socket: S }, } #[derive(Debug)] pub struct AlreadySubscribedError; impl std::fmt::Display for AlreadySubscribedError { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!( f, "Already subscribed to this server, cannot subscribe again" ) } } impl std::error::Error for AlreadySubscribedError {}
// TODO: consider sending signals or dropping channels instead of closing them like this?
random_line_split
server.rs
use crate::adapter::Adapter; use crate::socket::{ subscribe_socket_to_transport_events, Callback, Socket, SocketCloseReason, SocketEvent, }; use crate::transport::{Transport, TransportCreateData, TransportKind}; use crate::util::{HttpMethod, RequestContext, SendPacketError, ServerError, SetCookie}; use dashmap::DashMap; use engine_io_parser::packet::{Packet, PacketData}; use std::sync::Arc; use std::sync::Mutex; use tokio::sync::{broadcast, mpsc}; use uuid::Uuid; pub const BUFFER_CONST: usize = 32; #[derive(Debug, Clone, PartialEq)] pub struct ServerOptions { pub ping_timeout: u32, pub ping_interval: u32, pub upgrade_timeout: u32, pub transports: Vec<TransportKind>, pub allow_upgrades: bool, pub initial_packet: Option<Packet>, // TODO: implement this // pub allow_request: Option<Box<dyn (Fn() -> bool) + Send + 'static>>, pub cookie: Option<CookieOptions>, // TODO: node ws-specific options: // - maxHttpBufferSize // - perMessageDeflate // - httpCompression // -- cors pub buffer_factor: usize, } #[derive(Debug, Clone, PartialEq)] pub struct CookieOptions { pub name: String, pub path: String, pub http_only: bool, } #[derive(Debug, Clone)] pub struct EventSenders { // Event sender to external owner server: bmrng::RequestSender<ServerEvent, Packet>, // server: broadcast::Sender<ServerEvent>, /// Event sender to Socket instances. cloned and passed over client: mpsc::Sender<SocketEvent>, } #[derive(Debug)] pub enum ServerState { Unsubscribed { socket_event_receiver: mpsc::Receiver<SocketEvent>, engine_event_receiver: bmrng::RequestReceiver<ServerEvent, Packet>, }, Subscribed, } pub struct Server<A: 'static + Adapter> { state: Arc<Mutex<ServerState>>, // TODO: don't use a mutex here, instead have an internal socket state clients: Arc<DashMap<String, Arc<Socket<A>>>>, event_senders: EventSenders, // TODO: ping timeout handler EngineIoSocketTimeoutHandler pub options: ServerOptions, } impl Default for ServerOptions { fn default() -> Self { ServerOptions { ping_timeout: 5000, ping_interval: 25000, upgrade_timeout: 10000, transports: vec![TransportKind::WebSocket, TransportKind::Polling], allow_upgrades: true, initial_packet: None, cookie: Some(CookieOptions::default()), // allow_request: None, buffer_factor: 2, } } } impl Default for CookieOptions { fn default() -> Self { CookieOptions { name: "io".to_owned(), path: "/".to_owned(), http_only: true, } } } #[derive(Display, Debug, Clone, PartialEq)] pub enum ServerEvent { /// Socket ID Connection { connection_id: String, }, Close { connection_id: String, reason: SocketCloseReason, }, Flush { connection_id: String, }, Drain { connection_id: String, }, Message { connection_id: String, context: Arc<RequestContext>, data: PacketData, }, Error { connection_id: String, }, } impl<A: 'static + Adapter> Server<A> { pub fn new(options: ServerOptions) -> Self { // To listen events from socket instances let (client_event_sender, client_event_receiver) = mpsc::channel(options.buffer_factor * BUFFER_CONST); // To send events to the owner of this Server instance let (server_event_sender, server_event_receiver) = bmrng::channel(options.buffer_factor * BUFFER_CONST); Server { state: Arc::new(Mutex::new(ServerState::Unsubscribed { socket_event_receiver: client_event_receiver, engine_event_receiver: server_event_receiver, })), clients: Arc::new(DashMap::new()), event_senders: EventSenders { server: server_event_sender, client: client_event_sender, }, options, } } pub fn subscribe(&self) -> bmrng::RequestReceiver<ServerEvent, Packet> { self.try_subscribe() .expect("Already subscribed to engine_io_server::Server") } pub fn try_subscribe( &self, ) -> Result<bmrng::RequestReceiver<ServerEvent, Packet>, AlreadySubscribedError> { let mut state = self.state.lock().unwrap(); let old_state = std::mem::replace(&mut *state, ServerState::Subscribed); match old_state { ServerState::Subscribed => Err(AlreadySubscribedError), ServerState::Unsubscribed { socket_event_receiver, engine_event_receiver, } => { // First time calling subscribe, also start listening events from `Socket` instances self.subscribe_to_socket_events(socket_event_receiver); Ok(engine_event_receiver) } } // TODO: handle shutdown properly by receiving a shutdown signal // sending it to socket instances. } pub async fn close(&self) { // TODO: consider sending signals or dropping channels instead of closing them like this? // TODO: or drop the whole thing. The server, the sockets, everything. todo!(); // for socket in self.clients.iter() { // socket.value().close(true); // } } pub async fn close_socket(&self, connection_id: &str) { if let Some((_key, socket)) = self.clients.remove(connection_id) { // TODO: convert this to drop todo!(); // socket.close(true); } } // TODO: consider converting ack callbacks into optional async Results? // `connection_id` is an owned string just because of a Rust compiler issue. pub async fn send_packet_with_ack( &self, connection_id: String, packet: Packet, callback: Option<Callback>, ) -> Result<(), SendPacketError> { match self.clients.get(&connection_id) { Some(client) => Ok(client.send_packet(packet, None).await), None => Err(SendPacketError::UnknownConnectionId), } } pub async fn send_packet( &self, connection_id: String, packet: Packet, ) -> Result<(), SendPacketError> { match self.clients.get(&connection_id) { Some(client) => Ok(client.send_packet(packet, None).await), None => Err(SendPacketError::UnknownConnectionId), } } pub async fn handle_request( &self, context: RequestContext, body: Option<A::Body>, ) -> Result<A::Response, ServerError> { let context = Arc::new(context); let sid_ref = context.query.get("sid"); let sid = sid_ref.map(|s| s.to_owned()); self.verify_request(sid_ref, false, context.transport_kind, context.http_method) .await?; if let Some(sid) = sid { let client = self.get_client_or_error(&sid)?; let response = client.handle_polling_request(context.clone(), body).await?; Ok(response) } else { let (sid, response) = self.handshake(context, HandshakeData::Polling).await?; Ok(response) } } /// Akin to `onWebSocket` from engine.io js // TODO: handle errors, socket closure etc. pub async fn handle_upgrade(&self, context: RequestContext, socket: A::WsHandle) { let context = Arc::new(context); let sid_ref = context.query.get("sid"); let sid = sid_ref.map(|s| s.to_owned()); if let Some(sid) = sid { // TODO: don't panic let client = self.get_client_or_error(&sid).expect("TODO: fix this"); client.maybe_upgrade(context, todo!()); // TODO: implement this! // let client = // TODO: call socket.maybe_upgrade() } else { self.handshake(context, HandshakeData::WebSocket { socket }) .await; todo!(); } } pub async fn
( &self, sid: Option<&String>, upgrade: bool, transport_kind: TransportKind, http_method: HttpMethod, ) -> Result<(), ServerError> { if let Some(sid) = sid { let client = self.clients.get(sid); if let Some(client) = client { let client_transport_kind = client.get_transport_kind(); if !upgrade && Some(transport_kind) != client_transport_kind { return Err(ServerError::BadRequest); } } else { return Err(ServerError::UnknownSid); } } else { if http_method != HttpMethod::Get { return Err(ServerError::BadHandshakeMethod); } // FIXME: fix allow_request calls /*if let Some(validator) = &self.options.allow_request { // FIXME: pass some request parameters to this validator // to make it useful let valid = validator(); if !valid { return Err(ServerError::BadRequest); } }*/ } Ok(()) } /// Generate a new ID for a client. /// Note: This generates IDs in a different format from the original JS /// engine.io implementation, which uses a library called /// [base64id](https://www.npmjs.com/package/base64id) that doesn't seem /// to guarantee uniqueness. pub fn generate_id() -> String { Uuid::new_v4().to_hyphenated().to_string() } /// Returns the new client ID pub async fn handshake( &self, context: Arc<RequestContext>, data: HandshakeData<A::WsHandle>, ) -> Result<(String, A::Response), ServerError> { let sid = Self::generate_id(); let supports_binary = !context.query.contains_key("b64"); let jsonp = !supports_binary && !context.query.contains_key("j"); let context = Arc::new(context.with_set_cookie(SetCookie::from_cookie_options( &self.options.cookie, sid.clone(), ))); let transport_create_data = match data { HandshakeData::Polling => TransportCreateData::Polling { jsonp }, HandshakeData::WebSocket { socket } => TransportCreateData::WebSocket { supports_binary, socket, }, }; let socket = Arc::new(Socket::new( sid.clone(), context.clone(), self.event_senders.client.clone(), transport_create_data, )); self.clients.insert(sid.clone(), socket.clone()); socket.open(&self.options).await; // TODO: send this initial packet in the handshake request response? // so we'd need to return it to the adapter if let Some(initial_message_packet) = self.options.initial_packet.clone() { socket.send_packet(initial_message_packet, None).await; } subscribe_socket_to_transport_events(socket).await; let response = { let client = self.get_client_or_error(&sid)?; match client.get_transport_or_fail()?.as_ref() { Transport::Polling(_) => Ok(client.handle_polling_request(context, None).await?), _ => Err(ServerError::BadRequest), } }; // Emit a "connection" event. This is an internal event that's used by socket_io let _ = self .event_senders .server .clone() .send(ServerEvent::Connection { connection_id: sid.clone(), }); response.map(|response| Ok((sid, response)))? } pub fn clients_count(&self) -> usize { self.clients.len() } pub fn get_client_or_error(&self, id: &str) -> Result<Arc<Socket<A>>, ServerError> { if let Some(client) = self.clients.get(id) { Ok(client.value().clone()) } else { Err(ServerError::UnknownSid) } } fn subscribe_to_socket_events(&self, client_event_receiver: mpsc::Receiver<SocketEvent>) { // TODO: listen for responder responses on fallible events let server_event_sender = self.event_senders.server.clone(); let clients = self.clients.clone(); tokio::spawn(async move { let mut receiver = client_event_receiver; while let Some(message) = receiver.recv().await { match message { SocketEvent::Close { socket_id, reason } => { clients.remove(&socket_id); let _ = server_event_sender.send(ServerEvent::Close { connection_id: socket_id, reason, }); } SocketEvent::Flush { socket_id } => { // Forward the Flush event to the external listener let _ = server_event_sender.send(ServerEvent::Flush { connection_id: socket_id, }); } SocketEvent::Drain { socket_id } => { // Forward the Drain event to the external listener let _ = server_event_sender.send(ServerEvent::Drain { connection_id: socket_id, }); } SocketEvent::Message { socket_id, context, data, } => { // Forward the Drain event to the external listener let _ = server_event_sender.send(ServerEvent::Message { connection_id: socket_id, context, data, }); } SocketEvent::Error { socket_id } => { let _ = server_event_sender.send(ServerEvent::Error { connection_id: socket_id, }); } _ => {} } } }); } fn subscribe_to_commands(&self) { // TODO: receive packet send requests using a MPSC listener ? todo!(); } } #[derive(Debug)] pub enum HandshakeData<S> where S: 'static, { Polling, WebSocket { socket: S }, } #[derive(Debug)] pub struct AlreadySubscribedError; impl std::fmt::Display for AlreadySubscribedError { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!( f, "Already subscribed to this server, cannot subscribe again" ) } } impl std::error::Error for AlreadySubscribedError {}
verify_request
identifier_name
dkg.go
package dkg import ( "crypto/rand" "encoding/json" "errors" "fmt" "math/big" "github.com/MadBase/MadNet/crypto/bn256" "github.com/MadBase/MadNet/crypto/bn256/cloudflare" "github.com/MadBase/MadNet/logging" "github.com/ethereum/go-ethereum/common" "github.com/sirupsen/logrus" ) // Evil var logger *logrus.Logger = logging.GetLogger("dkg") // Useful pseudo-constants var ( empty2Big [2]*big.Int empty4Big [4]*big.Int h1BaseMessage []byte = []byte("MadHive Rocks!") ) // Participant contains what we know about other participants, i.e. public information type Participant struct { Address common.Address Index int PublicKey [2]*big.Int } // ParticipantList is a required type alias since the Sort interface is awful type ParticipantList []*Participant // Simplify logging func (p *Participant) String() string { out, err := json.Marshal(p) if err != nil { return err.Error() } return string(out) } // Len returns the len of the collection func (pl ParticipantList) Len() int { return len(pl) } // Less decides if element i is 'Less' than element j -- less ~= before func (pl ParticipantList) Less(i, j int) bool { return pl[i].Index < pl[j].Index } // Swap swaps elements i and j within the collection func (pl ParticipantList) Swap(i, j int) { pl[i], pl[j] = pl[j], pl[i] } // ThresholdForUserCount returns the threshold user count and k for successful key generation func ThresholdForUserCount(n int) (int, int) { k := n / 3 threshold := 2 * k if (n - 3*k) == 2 { threshold = threshold + 1 } return int(threshold), int(k) } // InverseArrayForUserCount pre-calculates an inverse array for use by ethereum contracts func InverseArrayForUserCount(n int) ([]*big.Int, error) { bigNeg2 := big.NewInt(-2) orderMinus2 := new(big.Int).Add(cloudflare.Order, bigNeg2) // Get inverse array; this array is required to help keep gas costs down // in the smart contract. Modular multiplication is much cheaper than // modular inversion (expopnentiation). invArrayBig := make([]*big.Int, n-1) for idx := 0; idx < n-1; idx++ { m := big.NewInt(int64(idx + 1)) mInv := new(big.Int).Exp(m, orderMinus2, cloudflare.Order) // Confirm res := new(big.Int).Mul(m, mInv) res.Mod(res, cloudflare.Order) if res.Cmp(common.Big1) != 0 { return nil, errors.New("Error when computing inverseArray") } invArrayBig[idx] = mInv } return invArrayBig, nil } // GenerateKeys returns a private key, a public key and potentially an error func GenerateKeys() (*big.Int, [2]*big.Int, error) { privateKey, publicKeyG1, err := cloudflare.RandomG1(rand.Reader) publicKey := bn256.G1ToBigIntArray(publicKeyG1) return privateKey, publicKey, err } // GenerateShares returns encrypted shares, private coefficients, commitments and potentially an error func GenerateShares(transportPrivateKey *big.Int, transportPublicKey [2]*big.Int, participants ParticipantList, threshold int) ([]*big.Int, []*big.Int, [][2]*big.Int, error)
// GenerateKeyShare returns G1 key share, G1 proof, G2 key share and potentially an error func GenerateKeyShare(firstPrivateCoefficients *big.Int) ([2]*big.Int, [2]*big.Int, [4]*big.Int, error) { h1Base, err := cloudflare.HashToG1(h1BaseMessage) if err != nil { return empty2Big, empty2Big, empty4Big, err } orderMinus1, _ := new(big.Int).SetString("21888242871839275222246405745257275088548364400416034343698204186575808495616", 10) h2Neg := new(cloudflare.G2).ScalarBaseMult(orderMinus1) if firstPrivateCoefficients == nil { return empty2Big, empty2Big, empty4Big, errors.New("Missing secret value, aka private coefficient[0]") } keyShareG1 := new(cloudflare.G1).ScalarMult(h1Base, firstPrivateCoefficients) keyShareG1Big := bn256.G1ToBigIntArray(keyShareG1) // KeyShare G2 h2Base := new(cloudflare.G2).ScalarBaseMult(common.Big1) keyShareG2 := new(cloudflare.G2).ScalarMult(h2Base, firstPrivateCoefficients) keyShareG2Big := bn256.G2ToBigIntArray(keyShareG2) // PairingCheck to ensure keyShareG1 and keyShareG2 form valid pair validPair := cloudflare.PairingCheck([]*cloudflare.G1{keyShareG1, h1Base}, []*cloudflare.G2{h2Neg, keyShareG2}) if !validPair { return empty2Big, empty2Big, empty4Big, errors.New("key shares not a valid pair") } // DLEQ Prooof g1Base := new(cloudflare.G1).ScalarBaseMult(common.Big1) g1Value := new(cloudflare.G1).ScalarBaseMult(firstPrivateCoefficients) keyShareDLEQProof, err := cloudflare.GenerateDLEQProofG1(h1Base, keyShareG1, g1Base, g1Value, firstPrivateCoefficients, rand.Reader) if err != nil { return empty2Big, empty2Big, empty4Big, err } // Verify DLEQ before sending err = cloudflare.VerifyDLEQProofG1(h1Base, keyShareG1, g1Base, g1Value, keyShareDLEQProof) if err != nil { return empty2Big, empty2Big, empty4Big, err } return keyShareG1Big, keyShareDLEQProof, keyShareG2Big, nil } // GenerateMasterPublicKey returns the master public key func GenerateMasterPublicKey(keyShare1s [][2]*big.Int, keyShare2s [][4]*big.Int) ([4]*big.Int, error) { if len(keyShare1s) != len(keyShare2s) { return empty4Big, errors.New("len(keyShare1s) != len(keyshare2s)") } // Some predefined stuff to setup h1Base, err := cloudflare.HashToG1(h1BaseMessage) if err != nil { return empty4Big, err } orderMinus1, _ := new(big.Int).SetString("21888242871839275222246405745257275088548364400416034343698204186575808495616", 10) h2Neg := new(cloudflare.G2).ScalarBaseMult(orderMinus1) // Generate master public key masterPublicKeyG1 := new(cloudflare.G1) masterPublicKeyG2 := new(cloudflare.G2) n := len(keyShare1s) for idx := 0; idx < n; idx++ { keySharedG1, err := bn256.BigIntArrayToG1(keyShare1s[idx]) if err != nil { return empty4Big, err } masterPublicKeyG1.Add(masterPublicKeyG1, keySharedG1) keySharedG2, err := bn256.BigIntArrayToG2(keyShare2s[idx]) if err != nil { return empty4Big, err } masterPublicKeyG2.Add(masterPublicKeyG2, keySharedG2) } masterPublicKey := bn256.G2ToBigIntArray(masterPublicKeyG2) validPair := cloudflare.PairingCheck([]*cloudflare.G1{masterPublicKeyG1, h1Base}, []*cloudflare.G2{h2Neg, masterPublicKeyG2}) if !validPair { return empty4Big, errors.New("invalid pairing for master public key") } return masterPublicKey, nil } // GenerateGroupKeys returns the group private key, group public key, a signature and potentially an error func GenerateGroupKeys(initialMessage []byte, transportPrivateKey *big.Int, transportPublicKey [2]*big.Int, privateCoefficients []*big.Int, encryptedShares [][]*big.Int, index int, participants ParticipantList, threshold int) (*big.Int, [4]*big.Int, [2]*big.Int, error) { // setup n := len(participants) // build portions of group secret key publicKeyG1s := make([]*cloudflare.G1, n) for idx := 0; idx < n; idx++ { publicKeyG1, err := bn256.BigIntArrayToG1(participants[idx].PublicKey) if err != nil { return nil, empty4Big, empty2Big, fmt.Errorf("error converting public key to g1: %v", err) } publicKeyG1s[idx] = publicKeyG1 } transportPublicKeyG1, err := bn256.BigIntArrayToG1(transportPublicKey) if err != nil { return nil, empty4Big, empty2Big, fmt.Errorf("error converting transport public key to g1: %v", err) } sharedEncrypted, err := cloudflare.CondenseCommitments(transportPublicKeyG1, encryptedShares, publicKeyG1s) if err != nil { return nil, empty4Big, empty2Big, fmt.Errorf("error condensing commitments: %v", err) } sharedSecrets, err := cloudflare.GenerateDecryptedShares(transportPrivateKey, sharedEncrypted, publicKeyG1s) if err != nil { return nil, empty4Big, empty2Big, fmt.Errorf("error generating decrypted shares: %v", err) } // here's the final group secret gskj := cloudflare.PrivatePolyEval(privateCoefficients, 1+index) for idx := 0; idx < len(sharedSecrets); idx++ { gskj.Add(gskj, sharedSecrets[idx]) } gskj.Mod(gskj, cloudflare.Order) // here's the group public gpkj := new(cloudflare.G2).ScalarBaseMult(gskj) gpkjBig := bn256.G2ToBigIntArray(gpkj) // create sig sig, err := cloudflare.Sign(initialMessage, gskj, cloudflare.HashToG1) if err != nil { return nil, empty4Big, empty2Big, fmt.Errorf("error signing message: %v", err) } sigBig := bn256.G1ToBigIntArray(sig) // verify signature validSig, err := cloudflare.Verify(initialMessage, sig, gpkj, cloudflare.HashToG1) if err != nil { return nil, empty4Big, empty2Big, fmt.Errorf("error verifying signature: %v", err) } if !validSig { return nil, empty4Big, empty2Big, errors.New("not a valid group signature") } return gskj, gpkjBig, sigBig, nil } // VerifyGroupSigners returns whether the participants are valid or potentially an error func VerifyGroupSigners(initialMessage []byte, masterPublicKey [4]*big.Int, publishedPublicKeys [][4]*big.Int, publishedSignatures [][2]*big.Int, participants ParticipantList, threshold int) (bool, error) { // setup n := len(participants) signers := threshold + 1 if signers != n { return false, fmt.Errorf("Number of signers (%v) != threshold + 1 (%v)", n, threshold+1) } // publishedSignatures, indicies and particiapnts must all be the same length if !(len(publishedSignatures) == n) { return false, fmt.Errorf("len() -> participants:%v publishedSignatures:%v", n, len(publishedSignatures)) } var err error indices := make([]int, n) publicKeys := make([]*cloudflare.G2, n) signatures := make([]*cloudflare.G1, n) for idx := 0; idx < n; idx++ { participant := participants[idx] publicKeys[idx], err = bn256.BigIntArrayToG2(publishedPublicKeys[idx]) if err != nil { return false, fmt.Errorf("failed to convert group public key for %v: %v", idx, err) } signatures[idx], err = bn256.BigIntArrayToG1(publishedSignatures[idx]) if err != nil { return false, fmt.Errorf("failed to convert signature for %v: %v", idx, err) } signatureValid, err := cloudflare.Verify(initialMessage, signatures[idx], publicKeys[idx], cloudflare.HashToG1) if err != nil { return false, fmt.Errorf("failed to verify signature for %v", idx) } if !signatureValid { logger.Warnf("Signature not valid for %v", participant.Index) } else { logger.Infof("Signature good for %v", participant.Index) } indices[idx] = participant.Index + 1 logger.Infof("Participant: 0x%x Idx: %v Index: %v", participant.Address, idx, participant.Index) } groupSignature, err := cloudflare.AggregateSignatures(signatures, indices, threshold) if err != nil { return false, err } masterPublicKeyG2, err := bn256.BigIntArrayToG2(masterPublicKey) validGrpSig, err := cloudflare.Verify(initialMessage, groupSignature, masterPublicKeyG2, cloudflare.HashToG1) if err != nil { return false, fmt.Errorf("Could not verify group signature: %v", err) } return validGrpSig, nil } // CategorizeGroupSigners returns 0 based indicies of honest participants, 0 based indicies of dishonest participants or an error func CategorizeGroupSigners(initialMessage []byte, masterPublicKey [4]*big.Int, publishedPublicKeys [][4]*big.Int, publishedSignatures [][2]*big.Int, participants ParticipantList, threshold int) ([]int, []int, error) { // useful bit of info n := len(participants) chunkSize := threshold + 1 // if we can't meet threshold we can't do much if n < chunkSize { return []int{}, []int{}, fmt.Errorf("not enough signers (%v) to meet threshold + 1 (%v)", n, chunkSize) } // len(publishedPublicKeys) must equal len(publishedSignatures) must equal len(participants) if n != len(publishedPublicKeys) || n != len(publishedSignatures) { return []int{}, []int{}, fmt.Errorf( "mismatched public keys (%v), signatures (%v) and participants (%v)", len(publishedPublicKeys), len(publishedSignatures), n) } // Now we we chunk arrays and verify chunks seperately knownGood := make([]bool, n) for begin := 0; begin < n; begin += chunkSize { end := begin + chunkSize if end > n { begin -= (end - n) end = n } logger.Infof("Verifying %v >= index > %v", begin, end) groupPublicKeys := publishedPublicKeys[begin:end] groupSignatures := publishedSignatures[begin:end] groupParticipants := participants[begin:end] good, err := VerifyGroupSigners(initialMessage, masterPublicKey, groupPublicKeys, groupSignatures, groupParticipants, threshold) if err != nil { return []int{}, []int{}, fmt.Errorf("failed verifying group signers between %v and %v: %v", begin, end, err) } // if the chunk verified then we mark each element as good if good { for idx := begin; idx < end; idx++ { knownGood[idx] = true // TODO this should be the participant index not idx } } logger.Infof("VerifyGroupSigners([%v:%v]): %v -> %v", begin, end, knownGood, good) } // Hopefully everything is good allGood := all(knownGood) logger.Infof("VerifyGroupSigners(...): %v", allGood) indices := make([]int, n) for idx, participant := range participants { indices[idx] = participant.Index } if allGood { return indices, []int{}, nil } return []int{}, indices, nil } // ------------------------------------ func all(m []bool) bool { for _, v := range m { if !v { return false } } return true }
{ // create coefficients (private/public) privateCoefficients, err := cloudflare.ConstructPrivatePolyCoefs(rand.Reader, threshold) if err != nil { return nil, nil, nil, err } publicCoefficients := cloudflare.GeneratePublicCoefs(privateCoefficients) // create commitments commitments := make([][2]*big.Int, len(publicCoefficients)) for idx, publicCoefficient := range publicCoefficients { commitments[idx] = bn256.G1ToBigIntArray(publicCoefficient) } // secret shares transportPublicKeyG1, err := bn256.BigIntArrayToG1(transportPublicKey) if err != nil { return nil, nil, nil, err } // convert public keys into G1 structs publicKeyG1s := []*cloudflare.G1{} for idx := 0; idx < len(participants); idx++ { participant := participants[idx] logger.Infof("participants[%v]: %v", idx, participant) if participant != nil && participant.PublicKey[0] != nil && participant.PublicKey[1] != nil { publicKeyG1, err := bn256.BigIntArrayToG1(participant.PublicKey) if err != nil { return nil, nil, nil, err } publicKeyG1s = append(publicKeyG1s, publicKeyG1) } } // check for missing data if len(publicKeyG1s) != len(participants) { return nil, nil, nil, fmt.Errorf("only have %v of %v public keys", len(publicKeyG1s), len(participants)) } if len(privateCoefficients) != threshold+1 { return nil, nil, nil, fmt.Errorf("only have %v of %v private coefficients", len(privateCoefficients), threshold+1) } // secretsArray, err := cloudflare.GenerateSecretShares(transportPublicKeyG1, privateCoefficients, publicKeyG1s) if err != nil { return nil, nil, nil, fmt.Errorf("failed to generate secret shares: %v", err) } // final encrypted shares encryptedShares, err := cloudflare.GenerateEncryptedShares(secretsArray, transportPrivateKey, publicKeyG1s) if err != nil { return nil, nil, nil, fmt.Errorf("failed to generate encrypted shares: %v", err) } return encryptedShares, privateCoefficients, commitments, nil }
identifier_body
dkg.go
package dkg import ( "crypto/rand" "encoding/json" "errors" "fmt" "math/big" "github.com/MadBase/MadNet/crypto/bn256" "github.com/MadBase/MadNet/crypto/bn256/cloudflare" "github.com/MadBase/MadNet/logging" "github.com/ethereum/go-ethereum/common" "github.com/sirupsen/logrus" ) // Evil var logger *logrus.Logger = logging.GetLogger("dkg") // Useful pseudo-constants var ( empty2Big [2]*big.Int empty4Big [4]*big.Int h1BaseMessage []byte = []byte("MadHive Rocks!") ) // Participant contains what we know about other participants, i.e. public information type Participant struct { Address common.Address Index int PublicKey [2]*big.Int } // ParticipantList is a required type alias since the Sort interface is awful type ParticipantList []*Participant // Simplify logging func (p *Participant) String() string { out, err := json.Marshal(p) if err != nil { return err.Error() } return string(out) } // Len returns the len of the collection func (pl ParticipantList) Len() int { return len(pl) } // Less decides if element i is 'Less' than element j -- less ~= before func (pl ParticipantList) Less(i, j int) bool { return pl[i].Index < pl[j].Index } // Swap swaps elements i and j within the collection func (pl ParticipantList) Swap(i, j int) { pl[i], pl[j] = pl[j], pl[i] } // ThresholdForUserCount returns the threshold user count and k for successful key generation func ThresholdForUserCount(n int) (int, int) { k := n / 3 threshold := 2 * k if (n - 3*k) == 2 { threshold = threshold + 1 } return int(threshold), int(k) } // InverseArrayForUserCount pre-calculates an inverse array for use by ethereum contracts func InverseArrayForUserCount(n int) ([]*big.Int, error) { bigNeg2 := big.NewInt(-2) orderMinus2 := new(big.Int).Add(cloudflare.Order, bigNeg2) // Get inverse array; this array is required to help keep gas costs down // in the smart contract. Modular multiplication is much cheaper than // modular inversion (expopnentiation). invArrayBig := make([]*big.Int, n-1) for idx := 0; idx < n-1; idx++ { m := big.NewInt(int64(idx + 1)) mInv := new(big.Int).Exp(m, orderMinus2, cloudflare.Order) // Confirm res := new(big.Int).Mul(m, mInv) res.Mod(res, cloudflare.Order) if res.Cmp(common.Big1) != 0 { return nil, errors.New("Error when computing inverseArray") } invArrayBig[idx] = mInv } return invArrayBig, nil } // GenerateKeys returns a private key, a public key and potentially an error func GenerateKeys() (*big.Int, [2]*big.Int, error) { privateKey, publicKeyG1, err := cloudflare.RandomG1(rand.Reader) publicKey := bn256.G1ToBigIntArray(publicKeyG1) return privateKey, publicKey, err } // GenerateShares returns encrypted shares, private coefficients, commitments and potentially an error func GenerateShares(transportPrivateKey *big.Int, transportPublicKey [2]*big.Int, participants ParticipantList, threshold int) ([]*big.Int, []*big.Int, [][2]*big.Int, error) { // create coefficients (private/public) privateCoefficients, err := cloudflare.ConstructPrivatePolyCoefs(rand.Reader, threshold) if err != nil { return nil, nil, nil, err } publicCoefficients := cloudflare.GeneratePublicCoefs(privateCoefficients) // create commitments commitments := make([][2]*big.Int, len(publicCoefficients)) for idx, publicCoefficient := range publicCoefficients { commitments[idx] = bn256.G1ToBigIntArray(publicCoefficient) } // secret shares transportPublicKeyG1, err := bn256.BigIntArrayToG1(transportPublicKey) if err != nil { return nil, nil, nil, err } // convert public keys into G1 structs publicKeyG1s := []*cloudflare.G1{} for idx := 0; idx < len(participants); idx++ { participant := participants[idx] logger.Infof("participants[%v]: %v", idx, participant) if participant != nil && participant.PublicKey[0] != nil && participant.PublicKey[1] != nil { publicKeyG1, err := bn256.BigIntArrayToG1(participant.PublicKey) if err != nil { return nil, nil, nil, err } publicKeyG1s = append(publicKeyG1s, publicKeyG1) } } // check for missing data if len(publicKeyG1s) != len(participants) { return nil, nil, nil, fmt.Errorf("only have %v of %v public keys", len(publicKeyG1s), len(participants)) } if len(privateCoefficients) != threshold+1 { return nil, nil, nil, fmt.Errorf("only have %v of %v private coefficients", len(privateCoefficients), threshold+1) } // secretsArray, err := cloudflare.GenerateSecretShares(transportPublicKeyG1, privateCoefficients, publicKeyG1s) if err != nil { return nil, nil, nil, fmt.Errorf("failed to generate secret shares: %v", err) } // final encrypted shares encryptedShares, err := cloudflare.GenerateEncryptedShares(secretsArray, transportPrivateKey, publicKeyG1s) if err != nil { return nil, nil, nil, fmt.Errorf("failed to generate encrypted shares: %v", err) } return encryptedShares, privateCoefficients, commitments, nil } // GenerateKeyShare returns G1 key share, G1 proof, G2 key share and potentially an error func GenerateKeyShare(firstPrivateCoefficients *big.Int) ([2]*big.Int, [2]*big.Int, [4]*big.Int, error) { h1Base, err := cloudflare.HashToG1(h1BaseMessage) if err != nil { return empty2Big, empty2Big, empty4Big, err } orderMinus1, _ := new(big.Int).SetString("21888242871839275222246405745257275088548364400416034343698204186575808495616", 10) h2Neg := new(cloudflare.G2).ScalarBaseMult(orderMinus1) if firstPrivateCoefficients == nil { return empty2Big, empty2Big, empty4Big, errors.New("Missing secret value, aka private coefficient[0]") } keyShareG1 := new(cloudflare.G1).ScalarMult(h1Base, firstPrivateCoefficients) keyShareG1Big := bn256.G1ToBigIntArray(keyShareG1) // KeyShare G2 h2Base := new(cloudflare.G2).ScalarBaseMult(common.Big1) keyShareG2 := new(cloudflare.G2).ScalarMult(h2Base, firstPrivateCoefficients) keyShareG2Big := bn256.G2ToBigIntArray(keyShareG2) // PairingCheck to ensure keyShareG1 and keyShareG2 form valid pair validPair := cloudflare.PairingCheck([]*cloudflare.G1{keyShareG1, h1Base}, []*cloudflare.G2{h2Neg, keyShareG2}) if !validPair { return empty2Big, empty2Big, empty4Big, errors.New("key shares not a valid pair") } // DLEQ Prooof g1Base := new(cloudflare.G1).ScalarBaseMult(common.Big1) g1Value := new(cloudflare.G1).ScalarBaseMult(firstPrivateCoefficients) keyShareDLEQProof, err := cloudflare.GenerateDLEQProofG1(h1Base, keyShareG1, g1Base, g1Value, firstPrivateCoefficients, rand.Reader) if err != nil { return empty2Big, empty2Big, empty4Big, err } // Verify DLEQ before sending err = cloudflare.VerifyDLEQProofG1(h1Base, keyShareG1, g1Base, g1Value, keyShareDLEQProof) if err != nil { return empty2Big, empty2Big, empty4Big, err } return keyShareG1Big, keyShareDLEQProof, keyShareG2Big, nil } // GenerateMasterPublicKey returns the master public key func GenerateMasterPublicKey(keyShare1s [][2]*big.Int, keyShare2s [][4]*big.Int) ([4]*big.Int, error) { if len(keyShare1s) != len(keyShare2s) { return empty4Big, errors.New("len(keyShare1s) != len(keyshare2s)") } // Some predefined stuff to setup h1Base, err := cloudflare.HashToG1(h1BaseMessage) if err != nil { return empty4Big, err } orderMinus1, _ := new(big.Int).SetString("21888242871839275222246405745257275088548364400416034343698204186575808495616", 10) h2Neg := new(cloudflare.G2).ScalarBaseMult(orderMinus1) // Generate master public key masterPublicKeyG1 := new(cloudflare.G1) masterPublicKeyG2 := new(cloudflare.G2) n := len(keyShare1s) for idx := 0; idx < n; idx++ { keySharedG1, err := bn256.BigIntArrayToG1(keyShare1s[idx]) if err != nil { return empty4Big, err } masterPublicKeyG1.Add(masterPublicKeyG1, keySharedG1) keySharedG2, err := bn256.BigIntArrayToG2(keyShare2s[idx]) if err != nil { return empty4Big, err } masterPublicKeyG2.Add(masterPublicKeyG2, keySharedG2) } masterPublicKey := bn256.G2ToBigIntArray(masterPublicKeyG2) validPair := cloudflare.PairingCheck([]*cloudflare.G1{masterPublicKeyG1, h1Base}, []*cloudflare.G2{h2Neg, masterPublicKeyG2}) if !validPair { return empty4Big, errors.New("invalid pairing for master public key") } return masterPublicKey, nil } // GenerateGroupKeys returns the group private key, group public key, a signature and potentially an error func GenerateGroupKeys(initialMessage []byte, transportPrivateKey *big.Int, transportPublicKey [2]*big.Int, privateCoefficients []*big.Int, encryptedShares [][]*big.Int, index int, participants ParticipantList, threshold int) (*big.Int, [4]*big.Int, [2]*big.Int, error) { // setup n := len(participants) // build portions of group secret key publicKeyG1s := make([]*cloudflare.G1, n) for idx := 0; idx < n; idx++
transportPublicKeyG1, err := bn256.BigIntArrayToG1(transportPublicKey) if err != nil { return nil, empty4Big, empty2Big, fmt.Errorf("error converting transport public key to g1: %v", err) } sharedEncrypted, err := cloudflare.CondenseCommitments(transportPublicKeyG1, encryptedShares, publicKeyG1s) if err != nil { return nil, empty4Big, empty2Big, fmt.Errorf("error condensing commitments: %v", err) } sharedSecrets, err := cloudflare.GenerateDecryptedShares(transportPrivateKey, sharedEncrypted, publicKeyG1s) if err != nil { return nil, empty4Big, empty2Big, fmt.Errorf("error generating decrypted shares: %v", err) } // here's the final group secret gskj := cloudflare.PrivatePolyEval(privateCoefficients, 1+index) for idx := 0; idx < len(sharedSecrets); idx++ { gskj.Add(gskj, sharedSecrets[idx]) } gskj.Mod(gskj, cloudflare.Order) // here's the group public gpkj := new(cloudflare.G2).ScalarBaseMult(gskj) gpkjBig := bn256.G2ToBigIntArray(gpkj) // create sig sig, err := cloudflare.Sign(initialMessage, gskj, cloudflare.HashToG1) if err != nil { return nil, empty4Big, empty2Big, fmt.Errorf("error signing message: %v", err) } sigBig := bn256.G1ToBigIntArray(sig) // verify signature validSig, err := cloudflare.Verify(initialMessage, sig, gpkj, cloudflare.HashToG1) if err != nil { return nil, empty4Big, empty2Big, fmt.Errorf("error verifying signature: %v", err) } if !validSig { return nil, empty4Big, empty2Big, errors.New("not a valid group signature") } return gskj, gpkjBig, sigBig, nil } // VerifyGroupSigners returns whether the participants are valid or potentially an error func VerifyGroupSigners(initialMessage []byte, masterPublicKey [4]*big.Int, publishedPublicKeys [][4]*big.Int, publishedSignatures [][2]*big.Int, participants ParticipantList, threshold int) (bool, error) { // setup n := len(participants) signers := threshold + 1 if signers != n { return false, fmt.Errorf("Number of signers (%v) != threshold + 1 (%v)", n, threshold+1) } // publishedSignatures, indicies and particiapnts must all be the same length if !(len(publishedSignatures) == n) { return false, fmt.Errorf("len() -> participants:%v publishedSignatures:%v", n, len(publishedSignatures)) } var err error indices := make([]int, n) publicKeys := make([]*cloudflare.G2, n) signatures := make([]*cloudflare.G1, n) for idx := 0; idx < n; idx++ { participant := participants[idx] publicKeys[idx], err = bn256.BigIntArrayToG2(publishedPublicKeys[idx]) if err != nil { return false, fmt.Errorf("failed to convert group public key for %v: %v", idx, err) } signatures[idx], err = bn256.BigIntArrayToG1(publishedSignatures[idx]) if err != nil { return false, fmt.Errorf("failed to convert signature for %v: %v", idx, err) } signatureValid, err := cloudflare.Verify(initialMessage, signatures[idx], publicKeys[idx], cloudflare.HashToG1) if err != nil { return false, fmt.Errorf("failed to verify signature for %v", idx) } if !signatureValid { logger.Warnf("Signature not valid for %v", participant.Index) } else { logger.Infof("Signature good for %v", participant.Index) } indices[idx] = participant.Index + 1 logger.Infof("Participant: 0x%x Idx: %v Index: %v", participant.Address, idx, participant.Index) } groupSignature, err := cloudflare.AggregateSignatures(signatures, indices, threshold) if err != nil { return false, err } masterPublicKeyG2, err := bn256.BigIntArrayToG2(masterPublicKey) validGrpSig, err := cloudflare.Verify(initialMessage, groupSignature, masterPublicKeyG2, cloudflare.HashToG1) if err != nil { return false, fmt.Errorf("Could not verify group signature: %v", err) } return validGrpSig, nil } // CategorizeGroupSigners returns 0 based indicies of honest participants, 0 based indicies of dishonest participants or an error func CategorizeGroupSigners(initialMessage []byte, masterPublicKey [4]*big.Int, publishedPublicKeys [][4]*big.Int, publishedSignatures [][2]*big.Int, participants ParticipantList, threshold int) ([]int, []int, error) { // useful bit of info n := len(participants) chunkSize := threshold + 1 // if we can't meet threshold we can't do much if n < chunkSize { return []int{}, []int{}, fmt.Errorf("not enough signers (%v) to meet threshold + 1 (%v)", n, chunkSize) } // len(publishedPublicKeys) must equal len(publishedSignatures) must equal len(participants) if n != len(publishedPublicKeys) || n != len(publishedSignatures) { return []int{}, []int{}, fmt.Errorf( "mismatched public keys (%v), signatures (%v) and participants (%v)", len(publishedPublicKeys), len(publishedSignatures), n) } // Now we we chunk arrays and verify chunks seperately knownGood := make([]bool, n) for begin := 0; begin < n; begin += chunkSize { end := begin + chunkSize if end > n { begin -= (end - n) end = n } logger.Infof("Verifying %v >= index > %v", begin, end) groupPublicKeys := publishedPublicKeys[begin:end] groupSignatures := publishedSignatures[begin:end] groupParticipants := participants[begin:end] good, err := VerifyGroupSigners(initialMessage, masterPublicKey, groupPublicKeys, groupSignatures, groupParticipants, threshold) if err != nil { return []int{}, []int{}, fmt.Errorf("failed verifying group signers between %v and %v: %v", begin, end, err) } // if the chunk verified then we mark each element as good if good { for idx := begin; idx < end; idx++ { knownGood[idx] = true // TODO this should be the participant index not idx } } logger.Infof("VerifyGroupSigners([%v:%v]): %v -> %v", begin, end, knownGood, good) } // Hopefully everything is good allGood := all(knownGood) logger.Infof("VerifyGroupSigners(...): %v", allGood) indices := make([]int, n) for idx, participant := range participants { indices[idx] = participant.Index } if allGood { return indices, []int{}, nil } return []int{}, indices, nil } // ------------------------------------ func all(m []bool) bool { for _, v := range m { if !v { return false } } return true }
{ publicKeyG1, err := bn256.BigIntArrayToG1(participants[idx].PublicKey) if err != nil { return nil, empty4Big, empty2Big, fmt.Errorf("error converting public key to g1: %v", err) } publicKeyG1s[idx] = publicKeyG1 }
conditional_block
dkg.go
package dkg import ( "crypto/rand" "encoding/json" "errors" "fmt" "math/big" "github.com/MadBase/MadNet/crypto/bn256" "github.com/MadBase/MadNet/crypto/bn256/cloudflare" "github.com/MadBase/MadNet/logging" "github.com/ethereum/go-ethereum/common" "github.com/sirupsen/logrus" ) // Evil var logger *logrus.Logger = logging.GetLogger("dkg") // Useful pseudo-constants var ( empty2Big [2]*big.Int empty4Big [4]*big.Int h1BaseMessage []byte = []byte("MadHive Rocks!") ) // Participant contains what we know about other participants, i.e. public information type Participant struct { Address common.Address Index int PublicKey [2]*big.Int } // ParticipantList is a required type alias since the Sort interface is awful type ParticipantList []*Participant // Simplify logging func (p *Participant) String() string { out, err := json.Marshal(p) if err != nil { return err.Error() } return string(out) } // Len returns the len of the collection func (pl ParticipantList) Len() int { return len(pl) } // Less decides if element i is 'Less' than element j -- less ~= before func (pl ParticipantList) Less(i, j int) bool { return pl[i].Index < pl[j].Index } // Swap swaps elements i and j within the collection func (pl ParticipantList) Swap(i, j int) { pl[i], pl[j] = pl[j], pl[i] } // ThresholdForUserCount returns the threshold user count and k for successful key generation func ThresholdForUserCount(n int) (int, int) { k := n / 3 threshold := 2 * k if (n - 3*k) == 2 { threshold = threshold + 1 } return int(threshold), int(k) } // InverseArrayForUserCount pre-calculates an inverse array for use by ethereum contracts func InverseArrayForUserCount(n int) ([]*big.Int, error) { bigNeg2 := big.NewInt(-2) orderMinus2 := new(big.Int).Add(cloudflare.Order, bigNeg2) // Get inverse array; this array is required to help keep gas costs down // in the smart contract. Modular multiplication is much cheaper than // modular inversion (expopnentiation). invArrayBig := make([]*big.Int, n-1) for idx := 0; idx < n-1; idx++ { m := big.NewInt(int64(idx + 1)) mInv := new(big.Int).Exp(m, orderMinus2, cloudflare.Order) // Confirm res := new(big.Int).Mul(m, mInv) res.Mod(res, cloudflare.Order) if res.Cmp(common.Big1) != 0 { return nil, errors.New("Error when computing inverseArray") } invArrayBig[idx] = mInv } return invArrayBig, nil } // GenerateKeys returns a private key, a public key and potentially an error func GenerateKeys() (*big.Int, [2]*big.Int, error) { privateKey, publicKeyG1, err := cloudflare.RandomG1(rand.Reader) publicKey := bn256.G1ToBigIntArray(publicKeyG1) return privateKey, publicKey, err } // GenerateShares returns encrypted shares, private coefficients, commitments and potentially an error func GenerateShares(transportPrivateKey *big.Int, transportPublicKey [2]*big.Int, participants ParticipantList, threshold int) ([]*big.Int, []*big.Int, [][2]*big.Int, error) { // create coefficients (private/public) privateCoefficients, err := cloudflare.ConstructPrivatePolyCoefs(rand.Reader, threshold) if err != nil { return nil, nil, nil, err } publicCoefficients := cloudflare.GeneratePublicCoefs(privateCoefficients) // create commitments commitments := make([][2]*big.Int, len(publicCoefficients)) for idx, publicCoefficient := range publicCoefficients { commitments[idx] = bn256.G1ToBigIntArray(publicCoefficient) } // secret shares transportPublicKeyG1, err := bn256.BigIntArrayToG1(transportPublicKey) if err != nil { return nil, nil, nil, err } // convert public keys into G1 structs publicKeyG1s := []*cloudflare.G1{} for idx := 0; idx < len(participants); idx++ { participant := participants[idx] logger.Infof("participants[%v]: %v", idx, participant) if participant != nil && participant.PublicKey[0] != nil && participant.PublicKey[1] != nil { publicKeyG1, err := bn256.BigIntArrayToG1(participant.PublicKey) if err != nil { return nil, nil, nil, err } publicKeyG1s = append(publicKeyG1s, publicKeyG1) } } // check for missing data if len(publicKeyG1s) != len(participants) { return nil, nil, nil, fmt.Errorf("only have %v of %v public keys", len(publicKeyG1s), len(participants)) } if len(privateCoefficients) != threshold+1 { return nil, nil, nil, fmt.Errorf("only have %v of %v private coefficients", len(privateCoefficients), threshold+1) } // secretsArray, err := cloudflare.GenerateSecretShares(transportPublicKeyG1, privateCoefficients, publicKeyG1s) if err != nil { return nil, nil, nil, fmt.Errorf("failed to generate secret shares: %v", err) } // final encrypted shares encryptedShares, err := cloudflare.GenerateEncryptedShares(secretsArray, transportPrivateKey, publicKeyG1s) if err != nil { return nil, nil, nil, fmt.Errorf("failed to generate encrypted shares: %v", err) } return encryptedShares, privateCoefficients, commitments, nil } // GenerateKeyShare returns G1 key share, G1 proof, G2 key share and potentially an error func GenerateKeyShare(firstPrivateCoefficients *big.Int) ([2]*big.Int, [2]*big.Int, [4]*big.Int, error) { h1Base, err := cloudflare.HashToG1(h1BaseMessage) if err != nil { return empty2Big, empty2Big, empty4Big, err } orderMinus1, _ := new(big.Int).SetString("21888242871839275222246405745257275088548364400416034343698204186575808495616", 10) h2Neg := new(cloudflare.G2).ScalarBaseMult(orderMinus1) if firstPrivateCoefficients == nil { return empty2Big, empty2Big, empty4Big, errors.New("Missing secret value, aka private coefficient[0]") } keyShareG1 := new(cloudflare.G1).ScalarMult(h1Base, firstPrivateCoefficients) keyShareG1Big := bn256.G1ToBigIntArray(keyShareG1) // KeyShare G2 h2Base := new(cloudflare.G2).ScalarBaseMult(common.Big1) keyShareG2 := new(cloudflare.G2).ScalarMult(h2Base, firstPrivateCoefficients) keyShareG2Big := bn256.G2ToBigIntArray(keyShareG2) // PairingCheck to ensure keyShareG1 and keyShareG2 form valid pair validPair := cloudflare.PairingCheck([]*cloudflare.G1{keyShareG1, h1Base}, []*cloudflare.G2{h2Neg, keyShareG2}) if !validPair { return empty2Big, empty2Big, empty4Big, errors.New("key shares not a valid pair") } // DLEQ Prooof g1Base := new(cloudflare.G1).ScalarBaseMult(common.Big1) g1Value := new(cloudflare.G1).ScalarBaseMult(firstPrivateCoefficients) keyShareDLEQProof, err := cloudflare.GenerateDLEQProofG1(h1Base, keyShareG1, g1Base, g1Value, firstPrivateCoefficients, rand.Reader) if err != nil { return empty2Big, empty2Big, empty4Big, err } // Verify DLEQ before sending err = cloudflare.VerifyDLEQProofG1(h1Base, keyShareG1, g1Base, g1Value, keyShareDLEQProof) if err != nil { return empty2Big, empty2Big, empty4Big, err } return keyShareG1Big, keyShareDLEQProof, keyShareG2Big, nil } // GenerateMasterPublicKey returns the master public key func GenerateMasterPublicKey(keyShare1s [][2]*big.Int, keyShare2s [][4]*big.Int) ([4]*big.Int, error) { if len(keyShare1s) != len(keyShare2s) { return empty4Big, errors.New("len(keyShare1s) != len(keyshare2s)") } // Some predefined stuff to setup h1Base, err := cloudflare.HashToG1(h1BaseMessage) if err != nil { return empty4Big, err } orderMinus1, _ := new(big.Int).SetString("21888242871839275222246405745257275088548364400416034343698204186575808495616", 10) h2Neg := new(cloudflare.G2).ScalarBaseMult(orderMinus1) // Generate master public key masterPublicKeyG1 := new(cloudflare.G1) masterPublicKeyG2 := new(cloudflare.G2) n := len(keyShare1s) for idx := 0; idx < n; idx++ { keySharedG1, err := bn256.BigIntArrayToG1(keyShare1s[idx]) if err != nil { return empty4Big, err } masterPublicKeyG1.Add(masterPublicKeyG1, keySharedG1) keySharedG2, err := bn256.BigIntArrayToG2(keyShare2s[idx]) if err != nil { return empty4Big, err } masterPublicKeyG2.Add(masterPublicKeyG2, keySharedG2) } masterPublicKey := bn256.G2ToBigIntArray(masterPublicKeyG2) validPair := cloudflare.PairingCheck([]*cloudflare.G1{masterPublicKeyG1, h1Base}, []*cloudflare.G2{h2Neg, masterPublicKeyG2}) if !validPair { return empty4Big, errors.New("invalid pairing for master public key") } return masterPublicKey, nil } // GenerateGroupKeys returns the group private key, group public key, a signature and potentially an error func GenerateGroupKeys(initialMessage []byte, transportPrivateKey *big.Int, transportPublicKey [2]*big.Int, privateCoefficients []*big.Int, encryptedShares [][]*big.Int, index int, participants ParticipantList, threshold int) (*big.Int, [4]*big.Int, [2]*big.Int, error) { // setup n := len(participants) // build portions of group secret key publicKeyG1s := make([]*cloudflare.G1, n) for idx := 0; idx < n; idx++ { publicKeyG1, err := bn256.BigIntArrayToG1(participants[idx].PublicKey) if err != nil { return nil, empty4Big, empty2Big, fmt.Errorf("error converting public key to g1: %v", err) } publicKeyG1s[idx] = publicKeyG1 }
if err != nil { return nil, empty4Big, empty2Big, fmt.Errorf("error converting transport public key to g1: %v", err) } sharedEncrypted, err := cloudflare.CondenseCommitments(transportPublicKeyG1, encryptedShares, publicKeyG1s) if err != nil { return nil, empty4Big, empty2Big, fmt.Errorf("error condensing commitments: %v", err) } sharedSecrets, err := cloudflare.GenerateDecryptedShares(transportPrivateKey, sharedEncrypted, publicKeyG1s) if err != nil { return nil, empty4Big, empty2Big, fmt.Errorf("error generating decrypted shares: %v", err) } // here's the final group secret gskj := cloudflare.PrivatePolyEval(privateCoefficients, 1+index) for idx := 0; idx < len(sharedSecrets); idx++ { gskj.Add(gskj, sharedSecrets[idx]) } gskj.Mod(gskj, cloudflare.Order) // here's the group public gpkj := new(cloudflare.G2).ScalarBaseMult(gskj) gpkjBig := bn256.G2ToBigIntArray(gpkj) // create sig sig, err := cloudflare.Sign(initialMessage, gskj, cloudflare.HashToG1) if err != nil { return nil, empty4Big, empty2Big, fmt.Errorf("error signing message: %v", err) } sigBig := bn256.G1ToBigIntArray(sig) // verify signature validSig, err := cloudflare.Verify(initialMessage, sig, gpkj, cloudflare.HashToG1) if err != nil { return nil, empty4Big, empty2Big, fmt.Errorf("error verifying signature: %v", err) } if !validSig { return nil, empty4Big, empty2Big, errors.New("not a valid group signature") } return gskj, gpkjBig, sigBig, nil } // VerifyGroupSigners returns whether the participants are valid or potentially an error func VerifyGroupSigners(initialMessage []byte, masterPublicKey [4]*big.Int, publishedPublicKeys [][4]*big.Int, publishedSignatures [][2]*big.Int, participants ParticipantList, threshold int) (bool, error) { // setup n := len(participants) signers := threshold + 1 if signers != n { return false, fmt.Errorf("Number of signers (%v) != threshold + 1 (%v)", n, threshold+1) } // publishedSignatures, indicies and particiapnts must all be the same length if !(len(publishedSignatures) == n) { return false, fmt.Errorf("len() -> participants:%v publishedSignatures:%v", n, len(publishedSignatures)) } var err error indices := make([]int, n) publicKeys := make([]*cloudflare.G2, n) signatures := make([]*cloudflare.G1, n) for idx := 0; idx < n; idx++ { participant := participants[idx] publicKeys[idx], err = bn256.BigIntArrayToG2(publishedPublicKeys[idx]) if err != nil { return false, fmt.Errorf("failed to convert group public key for %v: %v", idx, err) } signatures[idx], err = bn256.BigIntArrayToG1(publishedSignatures[idx]) if err != nil { return false, fmt.Errorf("failed to convert signature for %v: %v", idx, err) } signatureValid, err := cloudflare.Verify(initialMessage, signatures[idx], publicKeys[idx], cloudflare.HashToG1) if err != nil { return false, fmt.Errorf("failed to verify signature for %v", idx) } if !signatureValid { logger.Warnf("Signature not valid for %v", participant.Index) } else { logger.Infof("Signature good for %v", participant.Index) } indices[idx] = participant.Index + 1 logger.Infof("Participant: 0x%x Idx: %v Index: %v", participant.Address, idx, participant.Index) } groupSignature, err := cloudflare.AggregateSignatures(signatures, indices, threshold) if err != nil { return false, err } masterPublicKeyG2, err := bn256.BigIntArrayToG2(masterPublicKey) validGrpSig, err := cloudflare.Verify(initialMessage, groupSignature, masterPublicKeyG2, cloudflare.HashToG1) if err != nil { return false, fmt.Errorf("Could not verify group signature: %v", err) } return validGrpSig, nil } // CategorizeGroupSigners returns 0 based indicies of honest participants, 0 based indicies of dishonest participants or an error func CategorizeGroupSigners(initialMessage []byte, masterPublicKey [4]*big.Int, publishedPublicKeys [][4]*big.Int, publishedSignatures [][2]*big.Int, participants ParticipantList, threshold int) ([]int, []int, error) { // useful bit of info n := len(participants) chunkSize := threshold + 1 // if we can't meet threshold we can't do much if n < chunkSize { return []int{}, []int{}, fmt.Errorf("not enough signers (%v) to meet threshold + 1 (%v)", n, chunkSize) } // len(publishedPublicKeys) must equal len(publishedSignatures) must equal len(participants) if n != len(publishedPublicKeys) || n != len(publishedSignatures) { return []int{}, []int{}, fmt.Errorf( "mismatched public keys (%v), signatures (%v) and participants (%v)", len(publishedPublicKeys), len(publishedSignatures), n) } // Now we we chunk arrays and verify chunks seperately knownGood := make([]bool, n) for begin := 0; begin < n; begin += chunkSize { end := begin + chunkSize if end > n { begin -= (end - n) end = n } logger.Infof("Verifying %v >= index > %v", begin, end) groupPublicKeys := publishedPublicKeys[begin:end] groupSignatures := publishedSignatures[begin:end] groupParticipants := participants[begin:end] good, err := VerifyGroupSigners(initialMessage, masterPublicKey, groupPublicKeys, groupSignatures, groupParticipants, threshold) if err != nil { return []int{}, []int{}, fmt.Errorf("failed verifying group signers between %v and %v: %v", begin, end, err) } // if the chunk verified then we mark each element as good if good { for idx := begin; idx < end; idx++ { knownGood[idx] = true // TODO this should be the participant index not idx } } logger.Infof("VerifyGroupSigners([%v:%v]): %v -> %v", begin, end, knownGood, good) } // Hopefully everything is good allGood := all(knownGood) logger.Infof("VerifyGroupSigners(...): %v", allGood) indices := make([]int, n) for idx, participant := range participants { indices[idx] = participant.Index } if allGood { return indices, []int{}, nil } return []int{}, indices, nil } // ------------------------------------ func all(m []bool) bool { for _, v := range m { if !v { return false } } return true }
transportPublicKeyG1, err := bn256.BigIntArrayToG1(transportPublicKey)
random_line_split
dkg.go
package dkg import ( "crypto/rand" "encoding/json" "errors" "fmt" "math/big" "github.com/MadBase/MadNet/crypto/bn256" "github.com/MadBase/MadNet/crypto/bn256/cloudflare" "github.com/MadBase/MadNet/logging" "github.com/ethereum/go-ethereum/common" "github.com/sirupsen/logrus" ) // Evil var logger *logrus.Logger = logging.GetLogger("dkg") // Useful pseudo-constants var ( empty2Big [2]*big.Int empty4Big [4]*big.Int h1BaseMessage []byte = []byte("MadHive Rocks!") ) // Participant contains what we know about other participants, i.e. public information type Participant struct { Address common.Address Index int PublicKey [2]*big.Int } // ParticipantList is a required type alias since the Sort interface is awful type ParticipantList []*Participant // Simplify logging func (p *Participant) String() string { out, err := json.Marshal(p) if err != nil { return err.Error() } return string(out) } // Len returns the len of the collection func (pl ParticipantList) Len() int { return len(pl) } // Less decides if element i is 'Less' than element j -- less ~= before func (pl ParticipantList) Less(i, j int) bool { return pl[i].Index < pl[j].Index } // Swap swaps elements i and j within the collection func (pl ParticipantList)
(i, j int) { pl[i], pl[j] = pl[j], pl[i] } // ThresholdForUserCount returns the threshold user count and k for successful key generation func ThresholdForUserCount(n int) (int, int) { k := n / 3 threshold := 2 * k if (n - 3*k) == 2 { threshold = threshold + 1 } return int(threshold), int(k) } // InverseArrayForUserCount pre-calculates an inverse array for use by ethereum contracts func InverseArrayForUserCount(n int) ([]*big.Int, error) { bigNeg2 := big.NewInt(-2) orderMinus2 := new(big.Int).Add(cloudflare.Order, bigNeg2) // Get inverse array; this array is required to help keep gas costs down // in the smart contract. Modular multiplication is much cheaper than // modular inversion (expopnentiation). invArrayBig := make([]*big.Int, n-1) for idx := 0; idx < n-1; idx++ { m := big.NewInt(int64(idx + 1)) mInv := new(big.Int).Exp(m, orderMinus2, cloudflare.Order) // Confirm res := new(big.Int).Mul(m, mInv) res.Mod(res, cloudflare.Order) if res.Cmp(common.Big1) != 0 { return nil, errors.New("Error when computing inverseArray") } invArrayBig[idx] = mInv } return invArrayBig, nil } // GenerateKeys returns a private key, a public key and potentially an error func GenerateKeys() (*big.Int, [2]*big.Int, error) { privateKey, publicKeyG1, err := cloudflare.RandomG1(rand.Reader) publicKey := bn256.G1ToBigIntArray(publicKeyG1) return privateKey, publicKey, err } // GenerateShares returns encrypted shares, private coefficients, commitments and potentially an error func GenerateShares(transportPrivateKey *big.Int, transportPublicKey [2]*big.Int, participants ParticipantList, threshold int) ([]*big.Int, []*big.Int, [][2]*big.Int, error) { // create coefficients (private/public) privateCoefficients, err := cloudflare.ConstructPrivatePolyCoefs(rand.Reader, threshold) if err != nil { return nil, nil, nil, err } publicCoefficients := cloudflare.GeneratePublicCoefs(privateCoefficients) // create commitments commitments := make([][2]*big.Int, len(publicCoefficients)) for idx, publicCoefficient := range publicCoefficients { commitments[idx] = bn256.G1ToBigIntArray(publicCoefficient) } // secret shares transportPublicKeyG1, err := bn256.BigIntArrayToG1(transportPublicKey) if err != nil { return nil, nil, nil, err } // convert public keys into G1 structs publicKeyG1s := []*cloudflare.G1{} for idx := 0; idx < len(participants); idx++ { participant := participants[idx] logger.Infof("participants[%v]: %v", idx, participant) if participant != nil && participant.PublicKey[0] != nil && participant.PublicKey[1] != nil { publicKeyG1, err := bn256.BigIntArrayToG1(participant.PublicKey) if err != nil { return nil, nil, nil, err } publicKeyG1s = append(publicKeyG1s, publicKeyG1) } } // check for missing data if len(publicKeyG1s) != len(participants) { return nil, nil, nil, fmt.Errorf("only have %v of %v public keys", len(publicKeyG1s), len(participants)) } if len(privateCoefficients) != threshold+1 { return nil, nil, nil, fmt.Errorf("only have %v of %v private coefficients", len(privateCoefficients), threshold+1) } // secretsArray, err := cloudflare.GenerateSecretShares(transportPublicKeyG1, privateCoefficients, publicKeyG1s) if err != nil { return nil, nil, nil, fmt.Errorf("failed to generate secret shares: %v", err) } // final encrypted shares encryptedShares, err := cloudflare.GenerateEncryptedShares(secretsArray, transportPrivateKey, publicKeyG1s) if err != nil { return nil, nil, nil, fmt.Errorf("failed to generate encrypted shares: %v", err) } return encryptedShares, privateCoefficients, commitments, nil } // GenerateKeyShare returns G1 key share, G1 proof, G2 key share and potentially an error func GenerateKeyShare(firstPrivateCoefficients *big.Int) ([2]*big.Int, [2]*big.Int, [4]*big.Int, error) { h1Base, err := cloudflare.HashToG1(h1BaseMessage) if err != nil { return empty2Big, empty2Big, empty4Big, err } orderMinus1, _ := new(big.Int).SetString("21888242871839275222246405745257275088548364400416034343698204186575808495616", 10) h2Neg := new(cloudflare.G2).ScalarBaseMult(orderMinus1) if firstPrivateCoefficients == nil { return empty2Big, empty2Big, empty4Big, errors.New("Missing secret value, aka private coefficient[0]") } keyShareG1 := new(cloudflare.G1).ScalarMult(h1Base, firstPrivateCoefficients) keyShareG1Big := bn256.G1ToBigIntArray(keyShareG1) // KeyShare G2 h2Base := new(cloudflare.G2).ScalarBaseMult(common.Big1) keyShareG2 := new(cloudflare.G2).ScalarMult(h2Base, firstPrivateCoefficients) keyShareG2Big := bn256.G2ToBigIntArray(keyShareG2) // PairingCheck to ensure keyShareG1 and keyShareG2 form valid pair validPair := cloudflare.PairingCheck([]*cloudflare.G1{keyShareG1, h1Base}, []*cloudflare.G2{h2Neg, keyShareG2}) if !validPair { return empty2Big, empty2Big, empty4Big, errors.New("key shares not a valid pair") } // DLEQ Prooof g1Base := new(cloudflare.G1).ScalarBaseMult(common.Big1) g1Value := new(cloudflare.G1).ScalarBaseMult(firstPrivateCoefficients) keyShareDLEQProof, err := cloudflare.GenerateDLEQProofG1(h1Base, keyShareG1, g1Base, g1Value, firstPrivateCoefficients, rand.Reader) if err != nil { return empty2Big, empty2Big, empty4Big, err } // Verify DLEQ before sending err = cloudflare.VerifyDLEQProofG1(h1Base, keyShareG1, g1Base, g1Value, keyShareDLEQProof) if err != nil { return empty2Big, empty2Big, empty4Big, err } return keyShareG1Big, keyShareDLEQProof, keyShareG2Big, nil } // GenerateMasterPublicKey returns the master public key func GenerateMasterPublicKey(keyShare1s [][2]*big.Int, keyShare2s [][4]*big.Int) ([4]*big.Int, error) { if len(keyShare1s) != len(keyShare2s) { return empty4Big, errors.New("len(keyShare1s) != len(keyshare2s)") } // Some predefined stuff to setup h1Base, err := cloudflare.HashToG1(h1BaseMessage) if err != nil { return empty4Big, err } orderMinus1, _ := new(big.Int).SetString("21888242871839275222246405745257275088548364400416034343698204186575808495616", 10) h2Neg := new(cloudflare.G2).ScalarBaseMult(orderMinus1) // Generate master public key masterPublicKeyG1 := new(cloudflare.G1) masterPublicKeyG2 := new(cloudflare.G2) n := len(keyShare1s) for idx := 0; idx < n; idx++ { keySharedG1, err := bn256.BigIntArrayToG1(keyShare1s[idx]) if err != nil { return empty4Big, err } masterPublicKeyG1.Add(masterPublicKeyG1, keySharedG1) keySharedG2, err := bn256.BigIntArrayToG2(keyShare2s[idx]) if err != nil { return empty4Big, err } masterPublicKeyG2.Add(masterPublicKeyG2, keySharedG2) } masterPublicKey := bn256.G2ToBigIntArray(masterPublicKeyG2) validPair := cloudflare.PairingCheck([]*cloudflare.G1{masterPublicKeyG1, h1Base}, []*cloudflare.G2{h2Neg, masterPublicKeyG2}) if !validPair { return empty4Big, errors.New("invalid pairing for master public key") } return masterPublicKey, nil } // GenerateGroupKeys returns the group private key, group public key, a signature and potentially an error func GenerateGroupKeys(initialMessage []byte, transportPrivateKey *big.Int, transportPublicKey [2]*big.Int, privateCoefficients []*big.Int, encryptedShares [][]*big.Int, index int, participants ParticipantList, threshold int) (*big.Int, [4]*big.Int, [2]*big.Int, error) { // setup n := len(participants) // build portions of group secret key publicKeyG1s := make([]*cloudflare.G1, n) for idx := 0; idx < n; idx++ { publicKeyG1, err := bn256.BigIntArrayToG1(participants[idx].PublicKey) if err != nil { return nil, empty4Big, empty2Big, fmt.Errorf("error converting public key to g1: %v", err) } publicKeyG1s[idx] = publicKeyG1 } transportPublicKeyG1, err := bn256.BigIntArrayToG1(transportPublicKey) if err != nil { return nil, empty4Big, empty2Big, fmt.Errorf("error converting transport public key to g1: %v", err) } sharedEncrypted, err := cloudflare.CondenseCommitments(transportPublicKeyG1, encryptedShares, publicKeyG1s) if err != nil { return nil, empty4Big, empty2Big, fmt.Errorf("error condensing commitments: %v", err) } sharedSecrets, err := cloudflare.GenerateDecryptedShares(transportPrivateKey, sharedEncrypted, publicKeyG1s) if err != nil { return nil, empty4Big, empty2Big, fmt.Errorf("error generating decrypted shares: %v", err) } // here's the final group secret gskj := cloudflare.PrivatePolyEval(privateCoefficients, 1+index) for idx := 0; idx < len(sharedSecrets); idx++ { gskj.Add(gskj, sharedSecrets[idx]) } gskj.Mod(gskj, cloudflare.Order) // here's the group public gpkj := new(cloudflare.G2).ScalarBaseMult(gskj) gpkjBig := bn256.G2ToBigIntArray(gpkj) // create sig sig, err := cloudflare.Sign(initialMessage, gskj, cloudflare.HashToG1) if err != nil { return nil, empty4Big, empty2Big, fmt.Errorf("error signing message: %v", err) } sigBig := bn256.G1ToBigIntArray(sig) // verify signature validSig, err := cloudflare.Verify(initialMessage, sig, gpkj, cloudflare.HashToG1) if err != nil { return nil, empty4Big, empty2Big, fmt.Errorf("error verifying signature: %v", err) } if !validSig { return nil, empty4Big, empty2Big, errors.New("not a valid group signature") } return gskj, gpkjBig, sigBig, nil } // VerifyGroupSigners returns whether the participants are valid or potentially an error func VerifyGroupSigners(initialMessage []byte, masterPublicKey [4]*big.Int, publishedPublicKeys [][4]*big.Int, publishedSignatures [][2]*big.Int, participants ParticipantList, threshold int) (bool, error) { // setup n := len(participants) signers := threshold + 1 if signers != n { return false, fmt.Errorf("Number of signers (%v) != threshold + 1 (%v)", n, threshold+1) } // publishedSignatures, indicies and particiapnts must all be the same length if !(len(publishedSignatures) == n) { return false, fmt.Errorf("len() -> participants:%v publishedSignatures:%v", n, len(publishedSignatures)) } var err error indices := make([]int, n) publicKeys := make([]*cloudflare.G2, n) signatures := make([]*cloudflare.G1, n) for idx := 0; idx < n; idx++ { participant := participants[idx] publicKeys[idx], err = bn256.BigIntArrayToG2(publishedPublicKeys[idx]) if err != nil { return false, fmt.Errorf("failed to convert group public key for %v: %v", idx, err) } signatures[idx], err = bn256.BigIntArrayToG1(publishedSignatures[idx]) if err != nil { return false, fmt.Errorf("failed to convert signature for %v: %v", idx, err) } signatureValid, err := cloudflare.Verify(initialMessage, signatures[idx], publicKeys[idx], cloudflare.HashToG1) if err != nil { return false, fmt.Errorf("failed to verify signature for %v", idx) } if !signatureValid { logger.Warnf("Signature not valid for %v", participant.Index) } else { logger.Infof("Signature good for %v", participant.Index) } indices[idx] = participant.Index + 1 logger.Infof("Participant: 0x%x Idx: %v Index: %v", participant.Address, idx, participant.Index) } groupSignature, err := cloudflare.AggregateSignatures(signatures, indices, threshold) if err != nil { return false, err } masterPublicKeyG2, err := bn256.BigIntArrayToG2(masterPublicKey) validGrpSig, err := cloudflare.Verify(initialMessage, groupSignature, masterPublicKeyG2, cloudflare.HashToG1) if err != nil { return false, fmt.Errorf("Could not verify group signature: %v", err) } return validGrpSig, nil } // CategorizeGroupSigners returns 0 based indicies of honest participants, 0 based indicies of dishonest participants or an error func CategorizeGroupSigners(initialMessage []byte, masterPublicKey [4]*big.Int, publishedPublicKeys [][4]*big.Int, publishedSignatures [][2]*big.Int, participants ParticipantList, threshold int) ([]int, []int, error) { // useful bit of info n := len(participants) chunkSize := threshold + 1 // if we can't meet threshold we can't do much if n < chunkSize { return []int{}, []int{}, fmt.Errorf("not enough signers (%v) to meet threshold + 1 (%v)", n, chunkSize) } // len(publishedPublicKeys) must equal len(publishedSignatures) must equal len(participants) if n != len(publishedPublicKeys) || n != len(publishedSignatures) { return []int{}, []int{}, fmt.Errorf( "mismatched public keys (%v), signatures (%v) and participants (%v)", len(publishedPublicKeys), len(publishedSignatures), n) } // Now we we chunk arrays and verify chunks seperately knownGood := make([]bool, n) for begin := 0; begin < n; begin += chunkSize { end := begin + chunkSize if end > n { begin -= (end - n) end = n } logger.Infof("Verifying %v >= index > %v", begin, end) groupPublicKeys := publishedPublicKeys[begin:end] groupSignatures := publishedSignatures[begin:end] groupParticipants := participants[begin:end] good, err := VerifyGroupSigners(initialMessage, masterPublicKey, groupPublicKeys, groupSignatures, groupParticipants, threshold) if err != nil { return []int{}, []int{}, fmt.Errorf("failed verifying group signers between %v and %v: %v", begin, end, err) } // if the chunk verified then we mark each element as good if good { for idx := begin; idx < end; idx++ { knownGood[idx] = true // TODO this should be the participant index not idx } } logger.Infof("VerifyGroupSigners([%v:%v]): %v -> %v", begin, end, knownGood, good) } // Hopefully everything is good allGood := all(knownGood) logger.Infof("VerifyGroupSigners(...): %v", allGood) indices := make([]int, n) for idx, participant := range participants { indices[idx] = participant.Index } if allGood { return indices, []int{}, nil } return []int{}, indices, nil } // ------------------------------------ func all(m []bool) bool { for _, v := range m { if !v { return false } } return true }
Swap
identifier_name
id.rs
pub fn bug(name: impl AsRef<str>) -> usize { let name = name.as_ref().to_lowercase(); match BUGS.iter().position(|&other| other == name) { Some(index) => index, _ => panic!("bug '{}' has no id yet", name), } } pub fn fish(name: impl AsRef<str>) -> usize { let name = name.as_ref().to_lowercase(); match FISH.iter().position(|&other| other == name) { Some(index) => index, _ => panic!("fish '{}' has no id yet", name), } } pub fn fossil(name: impl AsRef<str>) -> usize { let name = name.as_ref().to_lowercase(); match FOSSILS.iter().position(|&other| other == name) { Some(index) => index, _ => panic!("fossil '{}' has no id yet", name), } } pub fn flower(name: impl AsRef<str>) -> usize
pub fn art(name: impl AsRef<str>) -> usize { let name = name.as_ref().to_lowercase(); match ART.iter().position(|&other| other == name) { Some(index) => index, _ => panic!("art '{}' has no id yet", name), } } pub fn villager(name: impl AsRef<str>) -> usize { let name = name.as_ref().to_lowercase(); match VILLAGERS.iter().position(|&other| other == name) { Some(index) => index, _ => panic!("villager '{}' has no id yet", name), } } const BUGS: &[&str] = &[ "common butterfly", "yellow butterfly", "tiger butterfly", "peacock butterfly", "common bluebottle", "paper kite butterfly", "great purple emperor", "monarch butterfly", "emperor butterfly", "agrias butterfly", "rajah brooke's birdwing", "queen alexandra's birdwing", "moth", "atlas moth", "madagascan sunset moth", "long locust", "migratory locust", "rice grasshopper", "grasshopper", "cricket", "bell cricket", "mantis", "orchid mantis", "honeybee", "wasp", "brown cicada", "robust cicada", "giant cicada", "walker cicada", "evening cicada", "cicada shell", "red dragonfly", "darner dragonfly", "banded dragonfly", "damselfly", "firefly", "mole cricket", "pondskater", "diving beetle", "giant water bug", "stinkbug", "man-faced stink bug", "ladybug", "tiger beetle", "jewel beetle", "violin beetle", "citrus long-horned beetle", "rosalia batesi beetle", "blue weevil beetle", "dung beetle", "earth-boring dung beetle", "scarab beetle", "drone beetle", "goliath beetle", "saw stag", "miyama stag", "giant stag", "rainbow stag", "cyclommatus stag", "golden stag", "giraffe stag", "horned dynastid", "horned atlas", "horned elephant", "horned hercules", "walking stick", "walking leaf", "bagworm", "ant", "hermit crab", "wharf roach", "fly", "mosquito", "flea", "snail", "pill bug", "centipede", "spider", "tarantula", "scorpion", ]; const FISH: &[&str] = &[ "bitterling", "pale chub", "crucian carp", "dace", "carp", "koi", "goldfish", "pop-eyed goldfish", "ranchu goldfish", "killifish", "crawfish", "soft-shelled turtle", "snapping turtle", "tadpole", "frog", "freshwater goby", "loach", "catfish", "giant snakehead", "bluegill", "yellow perch", "black bass", "tilapia", "pike", "pond smelt", "sweetfish", "cherry salmon", "char", "golden trout", "stringfish", "salmon", "king salmon", "mitten crab", "guppy", "nibble fish", "angelfish", "betta", "neon tetra", "rainbowfish", "piranha", "arowana", "dorado", "gar", "arapaima", "saddled bichir", "sturgeon", "sea butterfly", "sea horse", "clown fish", "surgeonfish", "butterfly fish", "napoleonfish", "zebra turkeyfish", "blowfish", "puffer fish", "anchovy", "horse mackerel", "barred knifejaw", "sea bass", "red snapper", "dab", "olive flounder", "squid", "moray eel", "ribbon eel", "tuna", "blue marlin", "giant trevally", "mahi-mahi", "ocean sunfish", "ray", "saw shark", "hammerhead shark", "great white shark", "whale shark", "suckerfish", "football fish", "oarfish", "barreleye", "coelacanth", ]; const FOSSILS: &[&str] = &[ "acanthostega", "amber", "ammonite", "anomalocaris", "archaeopteryx", "australopith", "coprolite", "dinosaur track", "dunkleosteus", "eusthenopteron", "juramaia", "myllokunmingia", "shark-tooth pattern", "trilobite", "ankylo skull", "ankylo torso", "ankylo tail", "archelon skull", "archelon tail", "brachio skull", "brachio chest", "brachio pelvis", "brachio tail", "deinony torso", "deinony tail", "dimetrodon skull", "dimetrodon torso", "diplo skull", "diplo neck", "diplo chest", "diplo pelvis", "diplo tail", "diplo tail tip", "iguanodon skull", "iguanodon torso", "iguanodon tail", "mammoth skull", "mammoth torso", "megacero skull", "megacero torso", "megacero tail", "left megalo side", "right megalo side", "ophthalmo skull", "ophthalmo torso", "pachysaurus skull", "pachysaurus tail", "parasaur skull", "parasaur torso", "parasaur tail", "plesio skull", "plesio tail", "plesio body", "right ptera wing", "ptera body", "left ptera wing", "right quetzal wing", "quetzal torso", "left quetzal wing", "sabertooth skull", "sabertooth tail", "spino skull", "spino torso", "spino tail", "stego skull", "stego torso", "stego tail", "tricera skull", "tricera torso", "tricera tail", "t. rex skull", "t. rex torso", "t. rex tail", ]; const FLOWERS: &[&str] = &[ "red cosmos", "white cosmos", "yellow cosmos", "pink cosmos", "orange cosmos", "black cosmos", "white tulips", "red tulips", "yellow tulips", "pink tulips", "orange tulips", "purple tulips", "black tulips", "yellow pansies", "red pansies", "white pansies", "orange pansies", "purple pansies", "blue pansies", "white roses", "red roses", "yellow roses", "pink roses", "orange roses", "purple roses", "black roses", "blue roses", "gold roses", "white lilies", "red lilies", "yellow lilies", "pink lilies", "orange lilies", "black lilies", "white windflowers", "orange windflowers", "red windflowers", "blue windflowers", "pink windflowers", "purple windflowers", "white hyacinths", "yellow hyacinths", "red hyacinths", "pink hyacinths", "orange hyacinths", "blue hyacinths", "purple hyacinths", "white mums", "yellow mums", "red mums", "purple mums", "pink mums", "green mums", ]; const ART: &[&str] = &[ "robust statue", "rock-head statue", "beautiful statue", "valiant statue", "gallant statue", "mystic statue", "informative statue", "warrior statue", "tremendous statue", "ancient statue", "motherly statue", "familiar statue", "great statue", "quaint painting", "graceful painting", "famous painting", "detailed painting", "basic painting", "serene painting", "amazing painting", "solemn painting", "scary painting", "jolly painting", "wistful painting", "moving painting", "wild painting left half", "wild painting right half", "scenic painting", "academic painting", "common painting", "flowery painting", "twinkling painting", "nice painting", "moody painting", "glowing painting", "perfect painting", "mysterious painting", "calm painting", "proper painting", "sinking painting", "worthy painting", "warm painting", "dynamic painting", ]; const VILLAGERS: &[&str] = &[ "amelia", "pierce", "apollo", "frank", "buzz", "sterling", "keaton", "celia", "avery", "deli", "tammi", "monty", "nana", "flip", "simon", "elise", "shari", "anabelle", "annalisa", "snooty", "pango", "olaf", "antonio", "cyrano", "beardo", "chow", "megan", "groucho", "grizzly", "klaus", "ike", "curt", "tutu", "nate", "paula", "pinky", "charlise", "teddy", "ursala", "filbert", "sally", "cally", "marshal", "agent s", "blaire", "nibbles", "sylvana", "mint", "hazel", "tasha", "pecan", "peanut", "caroline", "ricky", "static", "sheldon", "poppy", "axel", "dizzy", "big top", "tia", "ellie", "margie", "eloise", "opal", "paolo", "cyd", "tucker", "bill", "pate", "pompom", "derwin", "drake", "gloria", "scoot", "joey", "mallary", "molly", "deena", "ketchup", "freckles", "quillson", "maelle", "miranda", "weber", "croque", "drift", "diva", "henry", "frobert", "cousteau", "jambette", "jeremiah", "huck", "lily", "puddles", "tad", "prince", "ribbot", "raddle", "camofrog", "gigi", "wart jr.", "cesar", "boyd", "hans", "rocket", "al", "boone", "violet", "louie", "peewee", "flurry", "clay", "graham", "hamlet", "hamphrey", "apple", "rodney", "soleil", "genji", "chrissy", "claude", "dotty", "gabi", "gaston", "doc", "bonbon", "carmen", "cole", "coco", "pippy", "francine", "tiffany", "bunnie", "mira", "o'hare", "snake", "hopkins", "ruby", "benedict", "broffina", "ava", "ken", "becky", "goose", "plucky", "knox", "egbert", "maddie", "bea", "cherry", "goldie", "daisy", "walker", "butch", "portia", "biskit", "marcel", "cookie", "bones", "shep", "lucky", "benjamin", "mac", "astrid", "carrie", "kitt", "mathilda", "marcie", "rooney", "walt", "sylvia", "olivia", "lolly", "purrl", "raymond", "rudy", "katt", "bob", "punchy", "kabuki", "kitty", "kiki", "ankha", "mitzi", "felicity", "merry", "monique", "kid cat", "rosie", "stinky", "tangy", "tom", "moe", "tabby", "canberra", "gonzo", "alice", "melba", "ozzie", "lyman", "sydney", "eugene", "yuka", "alli", "drago", "del", "alfonso", "gayle", "sly", "boots", "tipper", "angus", "coach", "stu", "naomi", "vic", "t-bone", "norma", "patty", "rodeo", "bud", "leopold", "rory", "elvis", "mott", "lionel", "rex", "chadder", "dora", "anicotti", "broccolo", "greta", "bree", "rod", "moose", "penelope", "limberg", "candi", "rizzo", "samson", "bettina", "bella", "spike", "tank", "renée", "merengue", "rhonda", "hornsby", "bertha", "bitty", "bubbles", "hippeux", "harry", "biff", "rocco", "marina", "octavian", "zucker", "reneigh", "annalise", "cleo", "colton", "peaches", "elmer", "victoria", "papi", "ed", "julian", "roscoe", "buck", "clyde", "winnie", "savannah", "cube", "friga", "hopper", "gwen", "tex", "boomer", "iggly", "puck", "roald", "aurora", "wade", "flo", "sprinkle", "bam", "deirdre", "lopez", "erik", "fauna", "beau", "bruce", "fuchsia", "diana", "zell", "baabara", "vesta", "dom", "eunice", "cashmere", "curlos", "muffy", "frita", "willow", "pietro", "stella", "timbra", "wendy", "boris", "pancetti", "chops", "rasher", "hugh", "kevin", "lucy", "truffles", "maggie", "agnes", "curly", "gala", "peggy", "cobb", "spork", "blanche", "flora", "cranston", "phil", "queenie", "julia", "sprocket", "phoebe", "gladys", "sandy", "kody", "stitches", "pudge", "bluebear", "cheri", "chester", "barold", "june", "olive", "murphy", "judy", "maple", "pekoe", "tammy", "poncho", "vladimir", "tybalt", "bianca", "rolf", "rowan", "claudia", "bangle", "leonardo", "jitters", "midge", "admiral", "anchovy", "piper", "jay", "jacob", "robin", "lucha", "sparro", "peck", "jacques", "twiggy", "freya", "fang", "audie", "whitney", "lobo", "skye", "chief", "dobie", "vivian", "wolfgang", "kyle", "chevre", "kidd", "gruff", "billy", "sherb", "pashmina", "velma", "nan", ];
{ let name = name.as_ref().to_lowercase(); match FLOWERS.iter().position(|&other| other == name) { Some(index) => index, _ => panic!("flower '{}' has no id yet", name), } }
identifier_body
id.rs
pub fn bug(name: impl AsRef<str>) -> usize { let name = name.as_ref().to_lowercase(); match BUGS.iter().position(|&other| other == name) { Some(index) => index, _ => panic!("bug '{}' has no id yet", name), } } pub fn fish(name: impl AsRef<str>) -> usize { let name = name.as_ref().to_lowercase(); match FISH.iter().position(|&other| other == name) { Some(index) => index, _ => panic!("fish '{}' has no id yet", name), } } pub fn fossil(name: impl AsRef<str>) -> usize { let name = name.as_ref().to_lowercase(); match FOSSILS.iter().position(|&other| other == name) { Some(index) => index, _ => panic!("fossil '{}' has no id yet", name), } } pub fn flower(name: impl AsRef<str>) -> usize { let name = name.as_ref().to_lowercase(); match FLOWERS.iter().position(|&other| other == name) { Some(index) => index, _ => panic!("flower '{}' has no id yet", name), } } pub fn
(name: impl AsRef<str>) -> usize { let name = name.as_ref().to_lowercase(); match ART.iter().position(|&other| other == name) { Some(index) => index, _ => panic!("art '{}' has no id yet", name), } } pub fn villager(name: impl AsRef<str>) -> usize { let name = name.as_ref().to_lowercase(); match VILLAGERS.iter().position(|&other| other == name) { Some(index) => index, _ => panic!("villager '{}' has no id yet", name), } } const BUGS: &[&str] = &[ "common butterfly", "yellow butterfly", "tiger butterfly", "peacock butterfly", "common bluebottle", "paper kite butterfly", "great purple emperor", "monarch butterfly", "emperor butterfly", "agrias butterfly", "rajah brooke's birdwing", "queen alexandra's birdwing", "moth", "atlas moth", "madagascan sunset moth", "long locust", "migratory locust", "rice grasshopper", "grasshopper", "cricket", "bell cricket", "mantis", "orchid mantis", "honeybee", "wasp", "brown cicada", "robust cicada", "giant cicada", "walker cicada", "evening cicada", "cicada shell", "red dragonfly", "darner dragonfly", "banded dragonfly", "damselfly", "firefly", "mole cricket", "pondskater", "diving beetle", "giant water bug", "stinkbug", "man-faced stink bug", "ladybug", "tiger beetle", "jewel beetle", "violin beetle", "citrus long-horned beetle", "rosalia batesi beetle", "blue weevil beetle", "dung beetle", "earth-boring dung beetle", "scarab beetle", "drone beetle", "goliath beetle", "saw stag", "miyama stag", "giant stag", "rainbow stag", "cyclommatus stag", "golden stag", "giraffe stag", "horned dynastid", "horned atlas", "horned elephant", "horned hercules", "walking stick", "walking leaf", "bagworm", "ant", "hermit crab", "wharf roach", "fly", "mosquito", "flea", "snail", "pill bug", "centipede", "spider", "tarantula", "scorpion", ]; const FISH: &[&str] = &[ "bitterling", "pale chub", "crucian carp", "dace", "carp", "koi", "goldfish", "pop-eyed goldfish", "ranchu goldfish", "killifish", "crawfish", "soft-shelled turtle", "snapping turtle", "tadpole", "frog", "freshwater goby", "loach", "catfish", "giant snakehead", "bluegill", "yellow perch", "black bass", "tilapia", "pike", "pond smelt", "sweetfish", "cherry salmon", "char", "golden trout", "stringfish", "salmon", "king salmon", "mitten crab", "guppy", "nibble fish", "angelfish", "betta", "neon tetra", "rainbowfish", "piranha", "arowana", "dorado", "gar", "arapaima", "saddled bichir", "sturgeon", "sea butterfly", "sea horse", "clown fish", "surgeonfish", "butterfly fish", "napoleonfish", "zebra turkeyfish", "blowfish", "puffer fish", "anchovy", "horse mackerel", "barred knifejaw", "sea bass", "red snapper", "dab", "olive flounder", "squid", "moray eel", "ribbon eel", "tuna", "blue marlin", "giant trevally", "mahi-mahi", "ocean sunfish", "ray", "saw shark", "hammerhead shark", "great white shark", "whale shark", "suckerfish", "football fish", "oarfish", "barreleye", "coelacanth", ]; const FOSSILS: &[&str] = &[ "acanthostega", "amber", "ammonite", "anomalocaris", "archaeopteryx", "australopith", "coprolite", "dinosaur track", "dunkleosteus", "eusthenopteron", "juramaia", "myllokunmingia", "shark-tooth pattern", "trilobite", "ankylo skull", "ankylo torso", "ankylo tail", "archelon skull", "archelon tail", "brachio skull", "brachio chest", "brachio pelvis", "brachio tail", "deinony torso", "deinony tail", "dimetrodon skull", "dimetrodon torso", "diplo skull", "diplo neck", "diplo chest", "diplo pelvis", "diplo tail", "diplo tail tip", "iguanodon skull", "iguanodon torso", "iguanodon tail", "mammoth skull", "mammoth torso", "megacero skull", "megacero torso", "megacero tail", "left megalo side", "right megalo side", "ophthalmo skull", "ophthalmo torso", "pachysaurus skull", "pachysaurus tail", "parasaur skull", "parasaur torso", "parasaur tail", "plesio skull", "plesio tail", "plesio body", "right ptera wing", "ptera body", "left ptera wing", "right quetzal wing", "quetzal torso", "left quetzal wing", "sabertooth skull", "sabertooth tail", "spino skull", "spino torso", "spino tail", "stego skull", "stego torso", "stego tail", "tricera skull", "tricera torso", "tricera tail", "t. rex skull", "t. rex torso", "t. rex tail", ]; const FLOWERS: &[&str] = &[ "red cosmos", "white cosmos", "yellow cosmos", "pink cosmos", "orange cosmos", "black cosmos", "white tulips", "red tulips", "yellow tulips", "pink tulips", "orange tulips", "purple tulips", "black tulips", "yellow pansies", "red pansies", "white pansies", "orange pansies", "purple pansies", "blue pansies", "white roses", "red roses", "yellow roses", "pink roses", "orange roses", "purple roses", "black roses", "blue roses", "gold roses", "white lilies", "red lilies", "yellow lilies", "pink lilies", "orange lilies", "black lilies", "white windflowers", "orange windflowers", "red windflowers", "blue windflowers", "pink windflowers", "purple windflowers", "white hyacinths", "yellow hyacinths", "red hyacinths", "pink hyacinths", "orange hyacinths", "blue hyacinths", "purple hyacinths", "white mums", "yellow mums", "red mums", "purple mums", "pink mums", "green mums", ]; const ART: &[&str] = &[ "robust statue", "rock-head statue", "beautiful statue", "valiant statue", "gallant statue", "mystic statue", "informative statue", "warrior statue", "tremendous statue", "ancient statue", "motherly statue", "familiar statue", "great statue", "quaint painting", "graceful painting", "famous painting", "detailed painting", "basic painting", "serene painting", "amazing painting", "solemn painting", "scary painting", "jolly painting", "wistful painting", "moving painting", "wild painting left half", "wild painting right half", "scenic painting", "academic painting", "common painting", "flowery painting", "twinkling painting", "nice painting", "moody painting", "glowing painting", "perfect painting", "mysterious painting", "calm painting", "proper painting", "sinking painting", "worthy painting", "warm painting", "dynamic painting", ]; const VILLAGERS: &[&str] = &[ "amelia", "pierce", "apollo", "frank", "buzz", "sterling", "keaton", "celia", "avery", "deli", "tammi", "monty", "nana", "flip", "simon", "elise", "shari", "anabelle", "annalisa", "snooty", "pango", "olaf", "antonio", "cyrano", "beardo", "chow", "megan", "groucho", "grizzly", "klaus", "ike", "curt", "tutu", "nate", "paula", "pinky", "charlise", "teddy", "ursala", "filbert", "sally", "cally", "marshal", "agent s", "blaire", "nibbles", "sylvana", "mint", "hazel", "tasha", "pecan", "peanut", "caroline", "ricky", "static", "sheldon", "poppy", "axel", "dizzy", "big top", "tia", "ellie", "margie", "eloise", "opal", "paolo", "cyd", "tucker", "bill", "pate", "pompom", "derwin", "drake", "gloria", "scoot", "joey", "mallary", "molly", "deena", "ketchup", "freckles", "quillson", "maelle", "miranda", "weber", "croque", "drift", "diva", "henry", "frobert", "cousteau", "jambette", "jeremiah", "huck", "lily", "puddles", "tad", "prince", "ribbot", "raddle", "camofrog", "gigi", "wart jr.", "cesar", "boyd", "hans", "rocket", "al", "boone", "violet", "louie", "peewee", "flurry", "clay", "graham", "hamlet", "hamphrey", "apple", "rodney", "soleil", "genji", "chrissy", "claude", "dotty", "gabi", "gaston", "doc", "bonbon", "carmen", "cole", "coco", "pippy", "francine", "tiffany", "bunnie", "mira", "o'hare", "snake", "hopkins", "ruby", "benedict", "broffina", "ava", "ken", "becky", "goose", "plucky", "knox", "egbert", "maddie", "bea", "cherry", "goldie", "daisy", "walker", "butch", "portia", "biskit", "marcel", "cookie", "bones", "shep", "lucky", "benjamin", "mac", "astrid", "carrie", "kitt", "mathilda", "marcie", "rooney", "walt", "sylvia", "olivia", "lolly", "purrl", "raymond", "rudy", "katt", "bob", "punchy", "kabuki", "kitty", "kiki", "ankha", "mitzi", "felicity", "merry", "monique", "kid cat", "rosie", "stinky", "tangy", "tom", "moe", "tabby", "canberra", "gonzo", "alice", "melba", "ozzie", "lyman", "sydney", "eugene", "yuka", "alli", "drago", "del", "alfonso", "gayle", "sly", "boots", "tipper", "angus", "coach", "stu", "naomi", "vic", "t-bone", "norma", "patty", "rodeo", "bud", "leopold", "rory", "elvis", "mott", "lionel", "rex", "chadder", "dora", "anicotti", "broccolo", "greta", "bree", "rod", "moose", "penelope", "limberg", "candi", "rizzo", "samson", "bettina", "bella", "spike", "tank", "renée", "merengue", "rhonda", "hornsby", "bertha", "bitty", "bubbles", "hippeux", "harry", "biff", "rocco", "marina", "octavian", "zucker", "reneigh", "annalise", "cleo", "colton", "peaches", "elmer", "victoria", "papi", "ed", "julian", "roscoe", "buck", "clyde", "winnie", "savannah", "cube", "friga", "hopper", "gwen", "tex", "boomer", "iggly", "puck", "roald", "aurora", "wade", "flo", "sprinkle", "bam", "deirdre", "lopez", "erik", "fauna", "beau", "bruce", "fuchsia", "diana", "zell", "baabara", "vesta", "dom", "eunice", "cashmere", "curlos", "muffy", "frita", "willow", "pietro", "stella", "timbra", "wendy", "boris", "pancetti", "chops", "rasher", "hugh", "kevin", "lucy", "truffles", "maggie", "agnes", "curly", "gala", "peggy", "cobb", "spork", "blanche", "flora", "cranston", "phil", "queenie", "julia", "sprocket", "phoebe", "gladys", "sandy", "kody", "stitches", "pudge", "bluebear", "cheri", "chester", "barold", "june", "olive", "murphy", "judy", "maple", "pekoe", "tammy", "poncho", "vladimir", "tybalt", "bianca", "rolf", "rowan", "claudia", "bangle", "leonardo", "jitters", "midge", "admiral", "anchovy", "piper", "jay", "jacob", "robin", "lucha", "sparro", "peck", "jacques", "twiggy", "freya", "fang", "audie", "whitney", "lobo", "skye", "chief", "dobie", "vivian", "wolfgang", "kyle", "chevre", "kidd", "gruff", "billy", "sherb", "pashmina", "velma", "nan", ];
art
identifier_name
id.rs
pub fn bug(name: impl AsRef<str>) -> usize { let name = name.as_ref().to_lowercase(); match BUGS.iter().position(|&other| other == name) { Some(index) => index, _ => panic!("bug '{}' has no id yet", name), } } pub fn fish(name: impl AsRef<str>) -> usize { let name = name.as_ref().to_lowercase(); match FISH.iter().position(|&other| other == name) { Some(index) => index, _ => panic!("fish '{}' has no id yet", name), } } pub fn fossil(name: impl AsRef<str>) -> usize { let name = name.as_ref().to_lowercase(); match FOSSILS.iter().position(|&other| other == name) { Some(index) => index, _ => panic!("fossil '{}' has no id yet", name), } } pub fn flower(name: impl AsRef<str>) -> usize { let name = name.as_ref().to_lowercase(); match FLOWERS.iter().position(|&other| other == name) { Some(index) => index, _ => panic!("flower '{}' has no id yet", name), } } pub fn art(name: impl AsRef<str>) -> usize { let name = name.as_ref().to_lowercase(); match ART.iter().position(|&other| other == name) { Some(index) => index, _ => panic!("art '{}' has no id yet", name), } } pub fn villager(name: impl AsRef<str>) -> usize { let name = name.as_ref().to_lowercase(); match VILLAGERS.iter().position(|&other| other == name) { Some(index) => index, _ => panic!("villager '{}' has no id yet", name), } } const BUGS: &[&str] = &[ "common butterfly", "yellow butterfly", "tiger butterfly", "peacock butterfly", "common bluebottle", "paper kite butterfly", "great purple emperor", "monarch butterfly", "emperor butterfly", "agrias butterfly", "rajah brooke's birdwing", "queen alexandra's birdwing", "moth", "atlas moth", "madagascan sunset moth", "long locust", "migratory locust", "rice grasshopper", "grasshopper", "cricket", "bell cricket", "mantis", "orchid mantis", "honeybee", "wasp", "brown cicada", "robust cicada", "giant cicada", "walker cicada", "evening cicada", "cicada shell", "red dragonfly", "darner dragonfly", "banded dragonfly", "damselfly", "firefly", "mole cricket", "pondskater", "diving beetle", "giant water bug", "stinkbug", "man-faced stink bug", "ladybug", "tiger beetle", "jewel beetle", "violin beetle", "citrus long-horned beetle", "rosalia batesi beetle", "blue weevil beetle", "dung beetle", "earth-boring dung beetle", "scarab beetle", "drone beetle", "goliath beetle", "saw stag", "miyama stag", "giant stag", "rainbow stag", "cyclommatus stag", "golden stag", "giraffe stag", "horned dynastid", "horned atlas", "horned elephant", "horned hercules", "walking stick", "walking leaf", "bagworm", "ant", "hermit crab", "wharf roach", "fly", "mosquito", "flea", "snail", "pill bug", "centipede", "spider", "tarantula", "scorpion", ]; const FISH: &[&str] = &[ "bitterling", "pale chub", "crucian carp", "dace", "carp", "koi", "goldfish", "pop-eyed goldfish", "ranchu goldfish", "killifish", "crawfish", "soft-shelled turtle", "snapping turtle", "tadpole", "frog", "freshwater goby", "loach", "catfish", "giant snakehead", "bluegill", "yellow perch", "black bass", "tilapia", "pike", "pond smelt", "sweetfish", "cherry salmon", "char", "golden trout", "stringfish", "salmon", "king salmon", "mitten crab", "guppy", "nibble fish", "angelfish", "betta", "neon tetra", "rainbowfish", "piranha", "arowana", "dorado", "gar", "arapaima", "saddled bichir", "sturgeon", "sea butterfly", "sea horse", "clown fish", "surgeonfish", "butterfly fish", "napoleonfish", "zebra turkeyfish", "blowfish", "puffer fish", "anchovy", "horse mackerel", "barred knifejaw", "sea bass", "red snapper", "dab", "olive flounder", "squid", "moray eel", "ribbon eel", "tuna", "blue marlin", "giant trevally", "mahi-mahi", "ocean sunfish", "ray", "saw shark", "hammerhead shark", "great white shark", "whale shark", "suckerfish", "football fish", "oarfish", "barreleye", "coelacanth", ]; const FOSSILS: &[&str] = &[ "acanthostega", "amber", "ammonite", "anomalocaris", "archaeopteryx", "australopith", "coprolite", "dinosaur track", "dunkleosteus", "eusthenopteron", "juramaia", "myllokunmingia", "shark-tooth pattern", "trilobite", "ankylo skull", "ankylo torso", "ankylo tail", "archelon skull", "archelon tail", "brachio skull", "brachio chest", "brachio pelvis", "brachio tail", "deinony torso", "deinony tail", "dimetrodon skull", "dimetrodon torso", "diplo skull", "diplo neck", "diplo chest", "diplo pelvis", "diplo tail", "diplo tail tip", "iguanodon skull", "iguanodon torso", "iguanodon tail", "mammoth skull", "mammoth torso", "megacero skull", "megacero torso", "megacero tail", "left megalo side", "right megalo side", "ophthalmo skull", "ophthalmo torso", "pachysaurus skull", "pachysaurus tail", "parasaur skull", "parasaur torso", "parasaur tail", "plesio skull", "plesio tail", "plesio body", "right ptera wing", "ptera body", "left ptera wing", "right quetzal wing", "quetzal torso", "left quetzal wing", "sabertooth skull", "sabertooth tail", "spino skull", "spino torso", "spino tail", "stego skull", "stego torso", "stego tail", "tricera skull", "tricera torso", "tricera tail", "t. rex skull", "t. rex torso", "t. rex tail", ]; const FLOWERS: &[&str] = &[ "red cosmos",
"black cosmos", "white tulips", "red tulips", "yellow tulips", "pink tulips", "orange tulips", "purple tulips", "black tulips", "yellow pansies", "red pansies", "white pansies", "orange pansies", "purple pansies", "blue pansies", "white roses", "red roses", "yellow roses", "pink roses", "orange roses", "purple roses", "black roses", "blue roses", "gold roses", "white lilies", "red lilies", "yellow lilies", "pink lilies", "orange lilies", "black lilies", "white windflowers", "orange windflowers", "red windflowers", "blue windflowers", "pink windflowers", "purple windflowers", "white hyacinths", "yellow hyacinths", "red hyacinths", "pink hyacinths", "orange hyacinths", "blue hyacinths", "purple hyacinths", "white mums", "yellow mums", "red mums", "purple mums", "pink mums", "green mums", ]; const ART: &[&str] = &[ "robust statue", "rock-head statue", "beautiful statue", "valiant statue", "gallant statue", "mystic statue", "informative statue", "warrior statue", "tremendous statue", "ancient statue", "motherly statue", "familiar statue", "great statue", "quaint painting", "graceful painting", "famous painting", "detailed painting", "basic painting", "serene painting", "amazing painting", "solemn painting", "scary painting", "jolly painting", "wistful painting", "moving painting", "wild painting left half", "wild painting right half", "scenic painting", "academic painting", "common painting", "flowery painting", "twinkling painting", "nice painting", "moody painting", "glowing painting", "perfect painting", "mysterious painting", "calm painting", "proper painting", "sinking painting", "worthy painting", "warm painting", "dynamic painting", ]; const VILLAGERS: &[&str] = &[ "amelia", "pierce", "apollo", "frank", "buzz", "sterling", "keaton", "celia", "avery", "deli", "tammi", "monty", "nana", "flip", "simon", "elise", "shari", "anabelle", "annalisa", "snooty", "pango", "olaf", "antonio", "cyrano", "beardo", "chow", "megan", "groucho", "grizzly", "klaus", "ike", "curt", "tutu", "nate", "paula", "pinky", "charlise", "teddy", "ursala", "filbert", "sally", "cally", "marshal", "agent s", "blaire", "nibbles", "sylvana", "mint", "hazel", "tasha", "pecan", "peanut", "caroline", "ricky", "static", "sheldon", "poppy", "axel", "dizzy", "big top", "tia", "ellie", "margie", "eloise", "opal", "paolo", "cyd", "tucker", "bill", "pate", "pompom", "derwin", "drake", "gloria", "scoot", "joey", "mallary", "molly", "deena", "ketchup", "freckles", "quillson", "maelle", "miranda", "weber", "croque", "drift", "diva", "henry", "frobert", "cousteau", "jambette", "jeremiah", "huck", "lily", "puddles", "tad", "prince", "ribbot", "raddle", "camofrog", "gigi", "wart jr.", "cesar", "boyd", "hans", "rocket", "al", "boone", "violet", "louie", "peewee", "flurry", "clay", "graham", "hamlet", "hamphrey", "apple", "rodney", "soleil", "genji", "chrissy", "claude", "dotty", "gabi", "gaston", "doc", "bonbon", "carmen", "cole", "coco", "pippy", "francine", "tiffany", "bunnie", "mira", "o'hare", "snake", "hopkins", "ruby", "benedict", "broffina", "ava", "ken", "becky", "goose", "plucky", "knox", "egbert", "maddie", "bea", "cherry", "goldie", "daisy", "walker", "butch", "portia", "biskit", "marcel", "cookie", "bones", "shep", "lucky", "benjamin", "mac", "astrid", "carrie", "kitt", "mathilda", "marcie", "rooney", "walt", "sylvia", "olivia", "lolly", "purrl", "raymond", "rudy", "katt", "bob", "punchy", "kabuki", "kitty", "kiki", "ankha", "mitzi", "felicity", "merry", "monique", "kid cat", "rosie", "stinky", "tangy", "tom", "moe", "tabby", "canberra", "gonzo", "alice", "melba", "ozzie", "lyman", "sydney", "eugene", "yuka", "alli", "drago", "del", "alfonso", "gayle", "sly", "boots", "tipper", "angus", "coach", "stu", "naomi", "vic", "t-bone", "norma", "patty", "rodeo", "bud", "leopold", "rory", "elvis", "mott", "lionel", "rex", "chadder", "dora", "anicotti", "broccolo", "greta", "bree", "rod", "moose", "penelope", "limberg", "candi", "rizzo", "samson", "bettina", "bella", "spike", "tank", "renée", "merengue", "rhonda", "hornsby", "bertha", "bitty", "bubbles", "hippeux", "harry", "biff", "rocco", "marina", "octavian", "zucker", "reneigh", "annalise", "cleo", "colton", "peaches", "elmer", "victoria", "papi", "ed", "julian", "roscoe", "buck", "clyde", "winnie", "savannah", "cube", "friga", "hopper", "gwen", "tex", "boomer", "iggly", "puck", "roald", "aurora", "wade", "flo", "sprinkle", "bam", "deirdre", "lopez", "erik", "fauna", "beau", "bruce", "fuchsia", "diana", "zell", "baabara", "vesta", "dom", "eunice", "cashmere", "curlos", "muffy", "frita", "willow", "pietro", "stella", "timbra", "wendy", "boris", "pancetti", "chops", "rasher", "hugh", "kevin", "lucy", "truffles", "maggie", "agnes", "curly", "gala", "peggy", "cobb", "spork", "blanche", "flora", "cranston", "phil", "queenie", "julia", "sprocket", "phoebe", "gladys", "sandy", "kody", "stitches", "pudge", "bluebear", "cheri", "chester", "barold", "june", "olive", "murphy", "judy", "maple", "pekoe", "tammy", "poncho", "vladimir", "tybalt", "bianca", "rolf", "rowan", "claudia", "bangle", "leonardo", "jitters", "midge", "admiral", "anchovy", "piper", "jay", "jacob", "robin", "lucha", "sparro", "peck", "jacques", "twiggy", "freya", "fang", "audie", "whitney", "lobo", "skye", "chief", "dobie", "vivian", "wolfgang", "kyle", "chevre", "kidd", "gruff", "billy", "sherb", "pashmina", "velma", "nan", ];
"white cosmos", "yellow cosmos", "pink cosmos", "orange cosmos",
random_line_split
main.rs
use std::f64::consts::PI; use clap::*; use gre::*; use noise::*; use rand::Rng; use svg::node::element::path::Data; use svg::node::element::*; #[derive(Parser)] #[clap()] pub struct Opts { #[clap(short, long, default_value = "image.svg")] file: String, #[clap(short, long, default_value = "100.0")] pub width: f64, #[clap(short, long, default_value = "150.0")] pub height: f64, #[clap(short, long, default_value = "5.0")] pub pad: f64, #[clap(short, long, default_value = "0.0")] pub seed: f64, #[clap(short, long, default_value = "0.0")] pub seed1: f64, #[clap(short, long, default_value = "0.0")] pub seed2: f64, #[clap(short, long, default_value = "0.0")] pub seed3: f64, } fn shake<R: Rng>( path: Vec<(f64, f64)>, scale: f64, rng: &mut R, ) -> Vec<(f64, f64)> { path .iter() .map(|&(x, y)| { let dx = rng.gen_range(-scale, scale); let dy = rng.gen_range(-scale, scale); (x + dx, y + dy) }) .collect() } fn eagle<R: Rng>( origin: (f64, f64), scale: f64, rotation: f64, xreverse: bool, rng: &mut R, ) -> Vec<Vec<(f64, f64)>> { let xmul = if xreverse { -1.0 } else { 1.0 }; let count = 2 + (scale * 3.0) as usize; let mut routes: Vec<Vec<(f64, f64)>> = Vec::new(); let shaking = scale * 0.1; // body let bodyw = 5.0; let bodyh = 1.5; let headcompression = rng.gen_range(0.1, 0.5); let headoff = rng.gen_range(0.1, 0.5); for i in 0..count { let yp = (i as f64 - (count - 1) as f64 * 0.5) / (count as f64); let ybase = bodyh * yp; let route = shake( path_subdivide_to_curve( vec![ (-rng.gen_range(0.4, 0.6) * bodyw, 1.5 * ybase), (-0.3 * bodyw, ybase), (0.2 * bodyw, ybase), (0.45 * bodyw, headcompression * ybase + headoff * bodyh), ], 1, 0.8, ), shaking, rng, ); routes.push(route); } let count = 2 + (scale * rng.gen_range(4.0, 6.0)) as usize; // wings let wingw = 1.4; let wingh = 8.0; let dx1 = rng.gen_range(-4.0, 4.0) * rng.gen_range(0.0, 1.0); let dx2 = if rng.gen_bool(0.8) { -dx1 } else { rng.gen_range(-3.0, 3.0) }; let spread1 = 1.0 + rng.gen_range(0.0, 1.0) * rng.gen_range(0.0, 1.0); let spread2 = 1.0 + rng.gen_range(0.0, 1.0) * rng.gen_range(0.0, 1.0); let offset1 = rng.gen_range(-1.0, 0.6) * rng.gen_range(0.0, 1.0); let offset2 = rng.gen_range(-1.0, 0.6) * rng.gen_range(0.0, 1.0); let interp = 0.5; let wing1m = 1.0 - rng.gen_range(0.0, 0.5) * rng.gen_range(0.0, 1.0); let wing2m = 1.0 - rng.gen_range(0.0, 0.5) * rng.gen_range(0.0, 1.0); let wing2up = rng.gen_bool(0.5); for i in 0..count { let xp = (i as f64 - (count - 1) as f64 * 0.5) / (count as f64); let xbase = wingw * xp; let wing1 = rng.gen_range(0.8, 1.1) * wing1m; let wing2 = rng.gen_range(0.8, 1.1) * wing2m * (if wing2up { -1.0 } else { 1.0 }); let route = shake( path_subdivide_to_curve( vec![ ( xbase * spread1 + dx1 + wingw * offset1, -wingh * 0.5 * wing1, ), (xbase + dx1 * interp, -wingh * 0.5 * interp * wing1), (xbase, 0.0), (xbase + dx2 * interp, wingh * 0.5 * interp * wing2), (xbase * spread2 + dx2 + wingw * offset2, wingh * 0.5 * wing2), ], 2, 0.8, ), shaking, rng, ); routes.push(route); } // scale, rotate & translate routes .iter() .map(|route| { route .iter() .map(|&p| { let p = p_r(p, rotation); (xmul * scale * p.0 + origin.0, scale * p.1 + origin.1) }) .collect() }) .collect() } fn art(opts: &Opts) -> Vec<Group> { let height = opts.height; let width = opts.width; let pad = opts.pad; let mut rng = rng_from_seed(opts.seed); let perlin = Perlin::new(); let mut passage = Passage::new(0.5, width, height); let passage_threshold = 5; let min_route = 2; let peakfactor = rng.gen_range(-0.001, 0.001) * rng.gen_range(0.0, 1.0) * rng.gen_range(0.0, 1.0); let ynoisefactor = rng.gen_range(0.02, 0.1); let yincr = rng.gen_range(1.5, 3.0); let amp2 = rng.gen_range(1.0, 12.0); let precision = rng.gen_range(0.1, 0.3); let offsetstrategy = rng.gen_range(0, 5); let mut routes = Vec::new(); let mut cave_spawned = false; let cave_threshold = rng.gen_range(0.5, 0.9) * height; let mut cave_initial_pos = Vec::new(); let w = rng.gen_range(0.05, 0.2); let mut base_y = height * 5.0; let mut miny = height; let stopy = rng.gen_range(0.2, 0.5) * height; let mut height_map: Vec<f64> = Vec::new(); loop { if miny < stopy { break; } if miny < cave_threshold && !cave_spawned { cave_spawned = true; let xfrom = (0.5 - w / 2.0) * width; let xto = (0.5 + w / 2.0) * width; let yamp = rng.gen_range(8.0, 24.0); let mut x = xfrom; let mut route = Vec::new(); loop { if x > xto { break; } let xi = (x / precision) as usize; let ybottom = height_map[xi].min(height - pad); let ytop = ybottom - yamp * (0.4 * perlin.get([opts.seed * 3.1 + 5.4, x * 0.01]).abs() + 0.6 * (1.0 - (((x - xfrom) / (xto - xfrom) - 0.5) * 2.0).powf(2.0))); height_map[xi] = ytop; cave_initial_pos.push((x, ytop)); if (route.len() / 2) % 2 == 0 { route.push((x, ybottom)); route.push((x, ytop)); } else { route.push((x, ytop)); route.push((x, ybottom)); } x += precision; } routes.push(route); } let mut route = Vec::new(); let mut x = pad; let mut was_outside = true; loop { if x > width - pad { break; } let xv = (4.0 - base_y / height) * (x - width / 2.); let amp = height * 0.05; let mut y = base_y; if offsetstrategy == 0 { y += amp * peakfactor * xv * xv; } y += -amp * perlin .get([ // xv * 0.005, y * 0.02, 77. + opts.seed / 7.3 + perlin.get([ // -opts.seed * 7.3, 8.3 + xv * 0.02, y * 0.1, ]), ]) .abs(); if offsetstrategy == 1 { y += amp * peakfactor * xv * xv; } y += amp2 * amp * perlin.get([ // 8.3 + xv * 0.01, 88.1 + y * ynoisefactor, opts.seed * 97.3, ]); if offsetstrategy == 2 { y += amp * peakfactor * xv * xv; } y += amp * perlin.get([ // opts.seed * 9.3 - 77., xv * 0.1, y * 0.5, ]) * perlin .get([ // xv * 0.02, 88.1 + y * 0.2, -opts.seed / 7.7, ]) .min(0.0); if offsetstrategy == 3 { y += amp * peakfactor * xv * xv; } y += 0.1 * amp * (1.0 - miny / height) * perlin.get([ // 66666. + opts.seed * 1.3, 88.3 + xv * 0.5, 88.1 + y * 0.5, ]); if offsetstrategy == 4 { y += amp * peakfactor * xv * xv; } if y < miny { miny = y; } let mut collides = false; let xi = (x / precision) as usize; if xi >= height_map.len() { height_map.push(y); } else { if y > height_map[xi] { collides = true; } else { height_map[xi] = y; } } let inside = !collides && pad < x && x < width - pad && pad < y && y < height - pad; if inside && passage.get((x, y)) < passage_threshold { if was_outside { if route.len() > min_route { routes.push(route); } route = Vec::new(); } was_outside = false; route.push((x, y)); passage.count((x, y)); } else { was_outside = true; } x += precision; } if route.len() > min_route { routes.push(route); } base_y -= yincr; } let radius = 6.0; passage.grow_passage(radius); rng.shuffle(&mut cave_initial_pos); let mut positions = Vec::new(); for i in 0..rng.gen_range(4, 12) { if i >= cave_initial_pos.len() - 1 { break; } let initial = cave_initial_pos[i]; let mut a = -PI / 2. + rng.gen_range(-1.0, 1.0) * rng.gen_range(0.5, 1.0); let mut p = initial; let amp = 3.0; let pad = pad * 2.; loop {
a += rng.gen_range(-0.5, 0.5) * rng.gen_range(0.0, 1.0); } } rng.shuffle(&mut positions); let disp = rng.gen_range(0.5, 3.0); let mut eagles = Vec::new(); for p in positions { if rng.gen_bool(0.2) { continue; } let scale = rng.gen_range(0.3, 0.5); let p = ( p.0 + disp * rng.gen_range(-1.0, 1.0), p.1 + disp * rng.gen_range(-1.0, 1.0), ); if p.0 < pad || p.0 > width - pad || p.1 < pad || p.1 > height - pad { continue; } eagles.push(eagle( p, scale, rng.gen_range(-1.0, 1.0), rng.gen_bool(0.5), &mut rng, )); } routes = vec![routes, eagles.concat()].concat(); for i in 0..10 { let d = i as f64 * 0.25; routes.push(vec![ (pad + d, pad + d), (pad + d, height - pad - d), (width - pad - d, height - pad - d), (width - pad - d, pad + d), (pad + d, pad + d), ]); } let color = "black"; let mut data = Data::new(); for route in routes.clone() { data = render_route(data, route); } let mut l = layer(color); l = l.add(base_path(color, 0.35, data)); vec![l] } fn main() { let opts: Opts = Opts::parse(); let groups = art(&opts); let mut document = base_document("white", opts.width, opts.height); for g in groups { document = document.add(g); } svg::save(opts.file, &document).unwrap(); } #[derive(Clone)] struct Passage { precision: f64, width: f64, height: f64, counters: Vec<usize>, } impl Passage { pub fn new(precision: f64, width: f64, height: f64) -> Self { let wi = (width / precision).ceil() as usize; let hi = (height / precision).ceil() as usize; let counters = vec![0; wi * hi]; Passage { precision, width, height, counters, } } fn index(self: &Self, (x, y): (f64, f64)) -> usize { let wi = (self.width / self.precision).ceil() as usize; let hi = (self.height / self.precision).ceil() as usize; let xi = ((x / self.precision).round() as usize).max(0).min(wi - 1); let yi = ((y / self.precision).round() as usize).max(0).min(hi - 1); yi * wi + xi } pub fn count(self: &mut Self, p: (f64, f64)) -> usize { let i = self.index(p); let v = self.counters[i] + 1; self.counters[i] = v; v } pub fn count_once(self: &mut Self, p: (f64, f64)) { let i = self.index(p); let v = self.counters[i]; if v == 0 { self.counters[i] = 1; } } pub fn get(self: &Self, p: (f64, f64)) -> usize { let i = self.index(p); self.counters[i] } pub fn grow_passage(self: &mut Self, radius: f64) { let precision = self.precision; let width = self.width; let height = self.height; let counters: Vec<usize> = self.counters.iter().cloned().collect(); let mut mask = Vec::new(); // TODO, in future for even better perf, I will rewrite this // working directly with index integers instead of having to use index() / count_once() let mut x = -radius; loop { if x >= radius { break; } let mut y = -radius; loop { if y >= radius { break; } if x * x + y * y < radius * radius { mask.push((x, y)); } y += precision; } x += precision; } let mut x = 0.0; loop { if x >= width { break; } let mut y = 0.0; loop { if y >= height { break; } let index = self.index((x, y)); if counters[index] > 0 { for &(dx, dy) in mask.iter() { self.count_once((x + dx, y + dy)); } } y += precision; } x += precision; } } } fn lerp_point(a: (f64, f64), b: (f64, f64), m: f64) -> (f64, f64) { (a.0 * (1. - m) + b.0 * m, a.1 * (1. - m) + b.1 * m) } fn path_subdivide_to_curve_it( path: Vec<(f64, f64)>, interpolation: f64, ) -> Vec<(f64, f64)> { let l = path.len(); if l < 3 { return path; } let mut route = Vec::new(); let mut first = path[0]; let mut last = path[l - 1]; let looped = euclidian_dist(first, last) < 0.1; if looped { first = lerp_point(path[1], first, interpolation); } route.push(first); for i in 1..(l - 1) { let p = path[i]; let p1 = lerp_point(path[i - 1], p, interpolation); let p2 = lerp_point(path[i + 1], p, interpolation); route.push(p1); route.push(p2); } if looped { last = lerp_point(path[l - 2], last, interpolation); } route.push(last); if looped { route.push(first); } route } fn path_subdivide_to_curve( path: Vec<(f64, f64)>, n: usize, interpolation: f64, ) -> Vec<(f64, f64)> { let mut route = path; for _i in 0..n { route = path_subdivide_to_curve_it(route, interpolation); } route }
if p.0 < pad || p.0 > width - pad || p.1 < pad || p.1 > height - pad { break; } p = (p.0 + amp * a.cos(), p.1 + amp * a.sin()); positions.push(p);
random_line_split
main.rs
use std::f64::consts::PI; use clap::*; use gre::*; use noise::*; use rand::Rng; use svg::node::element::path::Data; use svg::node::element::*; #[derive(Parser)] #[clap()] pub struct Opts { #[clap(short, long, default_value = "image.svg")] file: String, #[clap(short, long, default_value = "100.0")] pub width: f64, #[clap(short, long, default_value = "150.0")] pub height: f64, #[clap(short, long, default_value = "5.0")] pub pad: f64, #[clap(short, long, default_value = "0.0")] pub seed: f64, #[clap(short, long, default_value = "0.0")] pub seed1: f64, #[clap(short, long, default_value = "0.0")] pub seed2: f64, #[clap(short, long, default_value = "0.0")] pub seed3: f64, } fn shake<R: Rng>( path: Vec<(f64, f64)>, scale: f64, rng: &mut R, ) -> Vec<(f64, f64)>
fn eagle<R: Rng>( origin: (f64, f64), scale: f64, rotation: f64, xreverse: bool, rng: &mut R, ) -> Vec<Vec<(f64, f64)>> { let xmul = if xreverse { -1.0 } else { 1.0 }; let count = 2 + (scale * 3.0) as usize; let mut routes: Vec<Vec<(f64, f64)>> = Vec::new(); let shaking = scale * 0.1; // body let bodyw = 5.0; let bodyh = 1.5; let headcompression = rng.gen_range(0.1, 0.5); let headoff = rng.gen_range(0.1, 0.5); for i in 0..count { let yp = (i as f64 - (count - 1) as f64 * 0.5) / (count as f64); let ybase = bodyh * yp; let route = shake( path_subdivide_to_curve( vec![ (-rng.gen_range(0.4, 0.6) * bodyw, 1.5 * ybase), (-0.3 * bodyw, ybase), (0.2 * bodyw, ybase), (0.45 * bodyw, headcompression * ybase + headoff * bodyh), ], 1, 0.8, ), shaking, rng, ); routes.push(route); } let count = 2 + (scale * rng.gen_range(4.0, 6.0)) as usize; // wings let wingw = 1.4; let wingh = 8.0; let dx1 = rng.gen_range(-4.0, 4.0) * rng.gen_range(0.0, 1.0); let dx2 = if rng.gen_bool(0.8) { -dx1 } else { rng.gen_range(-3.0, 3.0) }; let spread1 = 1.0 + rng.gen_range(0.0, 1.0) * rng.gen_range(0.0, 1.0); let spread2 = 1.0 + rng.gen_range(0.0, 1.0) * rng.gen_range(0.0, 1.0); let offset1 = rng.gen_range(-1.0, 0.6) * rng.gen_range(0.0, 1.0); let offset2 = rng.gen_range(-1.0, 0.6) * rng.gen_range(0.0, 1.0); let interp = 0.5; let wing1m = 1.0 - rng.gen_range(0.0, 0.5) * rng.gen_range(0.0, 1.0); let wing2m = 1.0 - rng.gen_range(0.0, 0.5) * rng.gen_range(0.0, 1.0); let wing2up = rng.gen_bool(0.5); for i in 0..count { let xp = (i as f64 - (count - 1) as f64 * 0.5) / (count as f64); let xbase = wingw * xp; let wing1 = rng.gen_range(0.8, 1.1) * wing1m; let wing2 = rng.gen_range(0.8, 1.1) * wing2m * (if wing2up { -1.0 } else { 1.0 }); let route = shake( path_subdivide_to_curve( vec![ ( xbase * spread1 + dx1 + wingw * offset1, -wingh * 0.5 * wing1, ), (xbase + dx1 * interp, -wingh * 0.5 * interp * wing1), (xbase, 0.0), (xbase + dx2 * interp, wingh * 0.5 * interp * wing2), (xbase * spread2 + dx2 + wingw * offset2, wingh * 0.5 * wing2), ], 2, 0.8, ), shaking, rng, ); routes.push(route); } // scale, rotate & translate routes .iter() .map(|route| { route .iter() .map(|&p| { let p = p_r(p, rotation); (xmul * scale * p.0 + origin.0, scale * p.1 + origin.1) }) .collect() }) .collect() } fn art(opts: &Opts) -> Vec<Group> { let height = opts.height; let width = opts.width; let pad = opts.pad; let mut rng = rng_from_seed(opts.seed); let perlin = Perlin::new(); let mut passage = Passage::new(0.5, width, height); let passage_threshold = 5; let min_route = 2; let peakfactor = rng.gen_range(-0.001, 0.001) * rng.gen_range(0.0, 1.0) * rng.gen_range(0.0, 1.0); let ynoisefactor = rng.gen_range(0.02, 0.1); let yincr = rng.gen_range(1.5, 3.0); let amp2 = rng.gen_range(1.0, 12.0); let precision = rng.gen_range(0.1, 0.3); let offsetstrategy = rng.gen_range(0, 5); let mut routes = Vec::new(); let mut cave_spawned = false; let cave_threshold = rng.gen_range(0.5, 0.9) * height; let mut cave_initial_pos = Vec::new(); let w = rng.gen_range(0.05, 0.2); let mut base_y = height * 5.0; let mut miny = height; let stopy = rng.gen_range(0.2, 0.5) * height; let mut height_map: Vec<f64> = Vec::new(); loop { if miny < stopy { break; } if miny < cave_threshold && !cave_spawned { cave_spawned = true; let xfrom = (0.5 - w / 2.0) * width; let xto = (0.5 + w / 2.0) * width; let yamp = rng.gen_range(8.0, 24.0); let mut x = xfrom; let mut route = Vec::new(); loop { if x > xto { break; } let xi = (x / precision) as usize; let ybottom = height_map[xi].min(height - pad); let ytop = ybottom - yamp * (0.4 * perlin.get([opts.seed * 3.1 + 5.4, x * 0.01]).abs() + 0.6 * (1.0 - (((x - xfrom) / (xto - xfrom) - 0.5) * 2.0).powf(2.0))); height_map[xi] = ytop; cave_initial_pos.push((x, ytop)); if (route.len() / 2) % 2 == 0 { route.push((x, ybottom)); route.push((x, ytop)); } else { route.push((x, ytop)); route.push((x, ybottom)); } x += precision; } routes.push(route); } let mut route = Vec::new(); let mut x = pad; let mut was_outside = true; loop { if x > width - pad { break; } let xv = (4.0 - base_y / height) * (x - width / 2.); let amp = height * 0.05; let mut y = base_y; if offsetstrategy == 0 { y += amp * peakfactor * xv * xv; } y += -amp * perlin .get([ // xv * 0.005, y * 0.02, 77. + opts.seed / 7.3 + perlin.get([ // -opts.seed * 7.3, 8.3 + xv * 0.02, y * 0.1, ]), ]) .abs(); if offsetstrategy == 1 { y += amp * peakfactor * xv * xv; } y += amp2 * amp * perlin.get([ // 8.3 + xv * 0.01, 88.1 + y * ynoisefactor, opts.seed * 97.3, ]); if offsetstrategy == 2 { y += amp * peakfactor * xv * xv; } y += amp * perlin.get([ // opts.seed * 9.3 - 77., xv * 0.1, y * 0.5, ]) * perlin .get([ // xv * 0.02, 88.1 + y * 0.2, -opts.seed / 7.7, ]) .min(0.0); if offsetstrategy == 3 { y += amp * peakfactor * xv * xv; } y += 0.1 * amp * (1.0 - miny / height) * perlin.get([ // 66666. + opts.seed * 1.3, 88.3 + xv * 0.5, 88.1 + y * 0.5, ]); if offsetstrategy == 4 { y += amp * peakfactor * xv * xv; } if y < miny { miny = y; } let mut collides = false; let xi = (x / precision) as usize; if xi >= height_map.len() { height_map.push(y); } else { if y > height_map[xi] { collides = true; } else { height_map[xi] = y; } } let inside = !collides && pad < x && x < width - pad && pad < y && y < height - pad; if inside && passage.get((x, y)) < passage_threshold { if was_outside { if route.len() > min_route { routes.push(route); } route = Vec::new(); } was_outside = false; route.push((x, y)); passage.count((x, y)); } else { was_outside = true; } x += precision; } if route.len() > min_route { routes.push(route); } base_y -= yincr; } let radius = 6.0; passage.grow_passage(radius); rng.shuffle(&mut cave_initial_pos); let mut positions = Vec::new(); for i in 0..rng.gen_range(4, 12) { if i >= cave_initial_pos.len() - 1 { break; } let initial = cave_initial_pos[i]; let mut a = -PI / 2. + rng.gen_range(-1.0, 1.0) * rng.gen_range(0.5, 1.0); let mut p = initial; let amp = 3.0; let pad = pad * 2.; loop { if p.0 < pad || p.0 > width - pad || p.1 < pad || p.1 > height - pad { break; } p = (p.0 + amp * a.cos(), p.1 + amp * a.sin()); positions.push(p); a += rng.gen_range(-0.5, 0.5) * rng.gen_range(0.0, 1.0); } } rng.shuffle(&mut positions); let disp = rng.gen_range(0.5, 3.0); let mut eagles = Vec::new(); for p in positions { if rng.gen_bool(0.2) { continue; } let scale = rng.gen_range(0.3, 0.5); let p = ( p.0 + disp * rng.gen_range(-1.0, 1.0), p.1 + disp * rng.gen_range(-1.0, 1.0), ); if p.0 < pad || p.0 > width - pad || p.1 < pad || p.1 > height - pad { continue; } eagles.push(eagle( p, scale, rng.gen_range(-1.0, 1.0), rng.gen_bool(0.5), &mut rng, )); } routes = vec![routes, eagles.concat()].concat(); for i in 0..10 { let d = i as f64 * 0.25; routes.push(vec![ (pad + d, pad + d), (pad + d, height - pad - d), (width - pad - d, height - pad - d), (width - pad - d, pad + d), (pad + d, pad + d), ]); } let color = "black"; let mut data = Data::new(); for route in routes.clone() { data = render_route(data, route); } let mut l = layer(color); l = l.add(base_path(color, 0.35, data)); vec![l] } fn main() { let opts: Opts = Opts::parse(); let groups = art(&opts); let mut document = base_document("white", opts.width, opts.height); for g in groups { document = document.add(g); } svg::save(opts.file, &document).unwrap(); } #[derive(Clone)] struct Passage { precision: f64, width: f64, height: f64, counters: Vec<usize>, } impl Passage { pub fn new(precision: f64, width: f64, height: f64) -> Self { let wi = (width / precision).ceil() as usize; let hi = (height / precision).ceil() as usize; let counters = vec![0; wi * hi]; Passage { precision, width, height, counters, } } fn index(self: &Self, (x, y): (f64, f64)) -> usize { let wi = (self.width / self.precision).ceil() as usize; let hi = (self.height / self.precision).ceil() as usize; let xi = ((x / self.precision).round() as usize).max(0).min(wi - 1); let yi = ((y / self.precision).round() as usize).max(0).min(hi - 1); yi * wi + xi } pub fn count(self: &mut Self, p: (f64, f64)) -> usize { let i = self.index(p); let v = self.counters[i] + 1; self.counters[i] = v; v } pub fn count_once(self: &mut Self, p: (f64, f64)) { let i = self.index(p); let v = self.counters[i]; if v == 0 { self.counters[i] = 1; } } pub fn get(self: &Self, p: (f64, f64)) -> usize { let i = self.index(p); self.counters[i] } pub fn grow_passage(self: &mut Self, radius: f64) { let precision = self.precision; let width = self.width; let height = self.height; let counters: Vec<usize> = self.counters.iter().cloned().collect(); let mut mask = Vec::new(); // TODO, in future for even better perf, I will rewrite this // working directly with index integers instead of having to use index() / count_once() let mut x = -radius; loop { if x >= radius { break; } let mut y = -radius; loop { if y >= radius { break; } if x * x + y * y < radius * radius { mask.push((x, y)); } y += precision; } x += precision; } let mut x = 0.0; loop { if x >= width { break; } let mut y = 0.0; loop { if y >= height { break; } let index = self.index((x, y)); if counters[index] > 0 { for &(dx, dy) in mask.iter() { self.count_once((x + dx, y + dy)); } } y += precision; } x += precision; } } } fn lerp_point(a: (f64, f64), b: (f64, f64), m: f64) -> (f64, f64) { (a.0 * (1. - m) + b.0 * m, a.1 * (1. - m) + b.1 * m) } fn path_subdivide_to_curve_it( path: Vec<(f64, f64)>, interpolation: f64, ) -> Vec<(f64, f64)> { let l = path.len(); if l < 3 { return path; } let mut route = Vec::new(); let mut first = path[0]; let mut last = path[l - 1]; let looped = euclidian_dist(first, last) < 0.1; if looped { first = lerp_point(path[1], first, interpolation); } route.push(first); for i in 1..(l - 1) { let p = path[i]; let p1 = lerp_point(path[i - 1], p, interpolation); let p2 = lerp_point(path[i + 1], p, interpolation); route.push(p1); route.push(p2); } if looped { last = lerp_point(path[l - 2], last, interpolation); } route.push(last); if looped { route.push(first); } route } fn path_subdivide_to_curve( path: Vec<(f64, f64)>, n: usize, interpolation: f64, ) -> Vec<(f64, f64)> { let mut route = path; for _i in 0..n { route = path_subdivide_to_curve_it(route, interpolation); } route }
{ path .iter() .map(|&(x, y)| { let dx = rng.gen_range(-scale, scale); let dy = rng.gen_range(-scale, scale); (x + dx, y + dy) }) .collect() }
identifier_body
main.rs
use std::f64::consts::PI; use clap::*; use gre::*; use noise::*; use rand::Rng; use svg::node::element::path::Data; use svg::node::element::*; #[derive(Parser)] #[clap()] pub struct
{ #[clap(short, long, default_value = "image.svg")] file: String, #[clap(short, long, default_value = "100.0")] pub width: f64, #[clap(short, long, default_value = "150.0")] pub height: f64, #[clap(short, long, default_value = "5.0")] pub pad: f64, #[clap(short, long, default_value = "0.0")] pub seed: f64, #[clap(short, long, default_value = "0.0")] pub seed1: f64, #[clap(short, long, default_value = "0.0")] pub seed2: f64, #[clap(short, long, default_value = "0.0")] pub seed3: f64, } fn shake<R: Rng>( path: Vec<(f64, f64)>, scale: f64, rng: &mut R, ) -> Vec<(f64, f64)> { path .iter() .map(|&(x, y)| { let dx = rng.gen_range(-scale, scale); let dy = rng.gen_range(-scale, scale); (x + dx, y + dy) }) .collect() } fn eagle<R: Rng>( origin: (f64, f64), scale: f64, rotation: f64, xreverse: bool, rng: &mut R, ) -> Vec<Vec<(f64, f64)>> { let xmul = if xreverse { -1.0 } else { 1.0 }; let count = 2 + (scale * 3.0) as usize; let mut routes: Vec<Vec<(f64, f64)>> = Vec::new(); let shaking = scale * 0.1; // body let bodyw = 5.0; let bodyh = 1.5; let headcompression = rng.gen_range(0.1, 0.5); let headoff = rng.gen_range(0.1, 0.5); for i in 0..count { let yp = (i as f64 - (count - 1) as f64 * 0.5) / (count as f64); let ybase = bodyh * yp; let route = shake( path_subdivide_to_curve( vec![ (-rng.gen_range(0.4, 0.6) * bodyw, 1.5 * ybase), (-0.3 * bodyw, ybase), (0.2 * bodyw, ybase), (0.45 * bodyw, headcompression * ybase + headoff * bodyh), ], 1, 0.8, ), shaking, rng, ); routes.push(route); } let count = 2 + (scale * rng.gen_range(4.0, 6.0)) as usize; // wings let wingw = 1.4; let wingh = 8.0; let dx1 = rng.gen_range(-4.0, 4.0) * rng.gen_range(0.0, 1.0); let dx2 = if rng.gen_bool(0.8) { -dx1 } else { rng.gen_range(-3.0, 3.0) }; let spread1 = 1.0 + rng.gen_range(0.0, 1.0) * rng.gen_range(0.0, 1.0); let spread2 = 1.0 + rng.gen_range(0.0, 1.0) * rng.gen_range(0.0, 1.0); let offset1 = rng.gen_range(-1.0, 0.6) * rng.gen_range(0.0, 1.0); let offset2 = rng.gen_range(-1.0, 0.6) * rng.gen_range(0.0, 1.0); let interp = 0.5; let wing1m = 1.0 - rng.gen_range(0.0, 0.5) * rng.gen_range(0.0, 1.0); let wing2m = 1.0 - rng.gen_range(0.0, 0.5) * rng.gen_range(0.0, 1.0); let wing2up = rng.gen_bool(0.5); for i in 0..count { let xp = (i as f64 - (count - 1) as f64 * 0.5) / (count as f64); let xbase = wingw * xp; let wing1 = rng.gen_range(0.8, 1.1) * wing1m; let wing2 = rng.gen_range(0.8, 1.1) * wing2m * (if wing2up { -1.0 } else { 1.0 }); let route = shake( path_subdivide_to_curve( vec![ ( xbase * spread1 + dx1 + wingw * offset1, -wingh * 0.5 * wing1, ), (xbase + dx1 * interp, -wingh * 0.5 * interp * wing1), (xbase, 0.0), (xbase + dx2 * interp, wingh * 0.5 * interp * wing2), (xbase * spread2 + dx2 + wingw * offset2, wingh * 0.5 * wing2), ], 2, 0.8, ), shaking, rng, ); routes.push(route); } // scale, rotate & translate routes .iter() .map(|route| { route .iter() .map(|&p| { let p = p_r(p, rotation); (xmul * scale * p.0 + origin.0, scale * p.1 + origin.1) }) .collect() }) .collect() } fn art(opts: &Opts) -> Vec<Group> { let height = opts.height; let width = opts.width; let pad = opts.pad; let mut rng = rng_from_seed(opts.seed); let perlin = Perlin::new(); let mut passage = Passage::new(0.5, width, height); let passage_threshold = 5; let min_route = 2; let peakfactor = rng.gen_range(-0.001, 0.001) * rng.gen_range(0.0, 1.0) * rng.gen_range(0.0, 1.0); let ynoisefactor = rng.gen_range(0.02, 0.1); let yincr = rng.gen_range(1.5, 3.0); let amp2 = rng.gen_range(1.0, 12.0); let precision = rng.gen_range(0.1, 0.3); let offsetstrategy = rng.gen_range(0, 5); let mut routes = Vec::new(); let mut cave_spawned = false; let cave_threshold = rng.gen_range(0.5, 0.9) * height; let mut cave_initial_pos = Vec::new(); let w = rng.gen_range(0.05, 0.2); let mut base_y = height * 5.0; let mut miny = height; let stopy = rng.gen_range(0.2, 0.5) * height; let mut height_map: Vec<f64> = Vec::new(); loop { if miny < stopy { break; } if miny < cave_threshold && !cave_spawned { cave_spawned = true; let xfrom = (0.5 - w / 2.0) * width; let xto = (0.5 + w / 2.0) * width; let yamp = rng.gen_range(8.0, 24.0); let mut x = xfrom; let mut route = Vec::new(); loop { if x > xto { break; } let xi = (x / precision) as usize; let ybottom = height_map[xi].min(height - pad); let ytop = ybottom - yamp * (0.4 * perlin.get([opts.seed * 3.1 + 5.4, x * 0.01]).abs() + 0.6 * (1.0 - (((x - xfrom) / (xto - xfrom) - 0.5) * 2.0).powf(2.0))); height_map[xi] = ytop; cave_initial_pos.push((x, ytop)); if (route.len() / 2) % 2 == 0 { route.push((x, ybottom)); route.push((x, ytop)); } else { route.push((x, ytop)); route.push((x, ybottom)); } x += precision; } routes.push(route); } let mut route = Vec::new(); let mut x = pad; let mut was_outside = true; loop { if x > width - pad { break; } let xv = (4.0 - base_y / height) * (x - width / 2.); let amp = height * 0.05; let mut y = base_y; if offsetstrategy == 0 { y += amp * peakfactor * xv * xv; } y += -amp * perlin .get([ // xv * 0.005, y * 0.02, 77. + opts.seed / 7.3 + perlin.get([ // -opts.seed * 7.3, 8.3 + xv * 0.02, y * 0.1, ]), ]) .abs(); if offsetstrategy == 1 { y += amp * peakfactor * xv * xv; } y += amp2 * amp * perlin.get([ // 8.3 + xv * 0.01, 88.1 + y * ynoisefactor, opts.seed * 97.3, ]); if offsetstrategy == 2 { y += amp * peakfactor * xv * xv; } y += amp * perlin.get([ // opts.seed * 9.3 - 77., xv * 0.1, y * 0.5, ]) * perlin .get([ // xv * 0.02, 88.1 + y * 0.2, -opts.seed / 7.7, ]) .min(0.0); if offsetstrategy == 3 { y += amp * peakfactor * xv * xv; } y += 0.1 * amp * (1.0 - miny / height) * perlin.get([ // 66666. + opts.seed * 1.3, 88.3 + xv * 0.5, 88.1 + y * 0.5, ]); if offsetstrategy == 4 { y += amp * peakfactor * xv * xv; } if y < miny { miny = y; } let mut collides = false; let xi = (x / precision) as usize; if xi >= height_map.len() { height_map.push(y); } else { if y > height_map[xi] { collides = true; } else { height_map[xi] = y; } } let inside = !collides && pad < x && x < width - pad && pad < y && y < height - pad; if inside && passage.get((x, y)) < passage_threshold { if was_outside { if route.len() > min_route { routes.push(route); } route = Vec::new(); } was_outside = false; route.push((x, y)); passage.count((x, y)); } else { was_outside = true; } x += precision; } if route.len() > min_route { routes.push(route); } base_y -= yincr; } let radius = 6.0; passage.grow_passage(radius); rng.shuffle(&mut cave_initial_pos); let mut positions = Vec::new(); for i in 0..rng.gen_range(4, 12) { if i >= cave_initial_pos.len() - 1 { break; } let initial = cave_initial_pos[i]; let mut a = -PI / 2. + rng.gen_range(-1.0, 1.0) * rng.gen_range(0.5, 1.0); let mut p = initial; let amp = 3.0; let pad = pad * 2.; loop { if p.0 < pad || p.0 > width - pad || p.1 < pad || p.1 > height - pad { break; } p = (p.0 + amp * a.cos(), p.1 + amp * a.sin()); positions.push(p); a += rng.gen_range(-0.5, 0.5) * rng.gen_range(0.0, 1.0); } } rng.shuffle(&mut positions); let disp = rng.gen_range(0.5, 3.0); let mut eagles = Vec::new(); for p in positions { if rng.gen_bool(0.2) { continue; } let scale = rng.gen_range(0.3, 0.5); let p = ( p.0 + disp * rng.gen_range(-1.0, 1.0), p.1 + disp * rng.gen_range(-1.0, 1.0), ); if p.0 < pad || p.0 > width - pad || p.1 < pad || p.1 > height - pad { continue; } eagles.push(eagle( p, scale, rng.gen_range(-1.0, 1.0), rng.gen_bool(0.5), &mut rng, )); } routes = vec![routes, eagles.concat()].concat(); for i in 0..10 { let d = i as f64 * 0.25; routes.push(vec![ (pad + d, pad + d), (pad + d, height - pad - d), (width - pad - d, height - pad - d), (width - pad - d, pad + d), (pad + d, pad + d), ]); } let color = "black"; let mut data = Data::new(); for route in routes.clone() { data = render_route(data, route); } let mut l = layer(color); l = l.add(base_path(color, 0.35, data)); vec![l] } fn main() { let opts: Opts = Opts::parse(); let groups = art(&opts); let mut document = base_document("white", opts.width, opts.height); for g in groups { document = document.add(g); } svg::save(opts.file, &document).unwrap(); } #[derive(Clone)] struct Passage { precision: f64, width: f64, height: f64, counters: Vec<usize>, } impl Passage { pub fn new(precision: f64, width: f64, height: f64) -> Self { let wi = (width / precision).ceil() as usize; let hi = (height / precision).ceil() as usize; let counters = vec![0; wi * hi]; Passage { precision, width, height, counters, } } fn index(self: &Self, (x, y): (f64, f64)) -> usize { let wi = (self.width / self.precision).ceil() as usize; let hi = (self.height / self.precision).ceil() as usize; let xi = ((x / self.precision).round() as usize).max(0).min(wi - 1); let yi = ((y / self.precision).round() as usize).max(0).min(hi - 1); yi * wi + xi } pub fn count(self: &mut Self, p: (f64, f64)) -> usize { let i = self.index(p); let v = self.counters[i] + 1; self.counters[i] = v; v } pub fn count_once(self: &mut Self, p: (f64, f64)) { let i = self.index(p); let v = self.counters[i]; if v == 0 { self.counters[i] = 1; } } pub fn get(self: &Self, p: (f64, f64)) -> usize { let i = self.index(p); self.counters[i] } pub fn grow_passage(self: &mut Self, radius: f64) { let precision = self.precision; let width = self.width; let height = self.height; let counters: Vec<usize> = self.counters.iter().cloned().collect(); let mut mask = Vec::new(); // TODO, in future for even better perf, I will rewrite this // working directly with index integers instead of having to use index() / count_once() let mut x = -radius; loop { if x >= radius { break; } let mut y = -radius; loop { if y >= radius { break; } if x * x + y * y < radius * radius { mask.push((x, y)); } y += precision; } x += precision; } let mut x = 0.0; loop { if x >= width { break; } let mut y = 0.0; loop { if y >= height { break; } let index = self.index((x, y)); if counters[index] > 0 { for &(dx, dy) in mask.iter() { self.count_once((x + dx, y + dy)); } } y += precision; } x += precision; } } } fn lerp_point(a: (f64, f64), b: (f64, f64), m: f64) -> (f64, f64) { (a.0 * (1. - m) + b.0 * m, a.1 * (1. - m) + b.1 * m) } fn path_subdivide_to_curve_it( path: Vec<(f64, f64)>, interpolation: f64, ) -> Vec<(f64, f64)> { let l = path.len(); if l < 3 { return path; } let mut route = Vec::new(); let mut first = path[0]; let mut last = path[l - 1]; let looped = euclidian_dist(first, last) < 0.1; if looped { first = lerp_point(path[1], first, interpolation); } route.push(first); for i in 1..(l - 1) { let p = path[i]; let p1 = lerp_point(path[i - 1], p, interpolation); let p2 = lerp_point(path[i + 1], p, interpolation); route.push(p1); route.push(p2); } if looped { last = lerp_point(path[l - 2], last, interpolation); } route.push(last); if looped { route.push(first); } route } fn path_subdivide_to_curve( path: Vec<(f64, f64)>, n: usize, interpolation: f64, ) -> Vec<(f64, f64)> { let mut route = path; for _i in 0..n { route = path_subdivide_to_curve_it(route, interpolation); } route }
Opts
identifier_name
main.rs
use std::f64::consts::PI; use clap::*; use gre::*; use noise::*; use rand::Rng; use svg::node::element::path::Data; use svg::node::element::*; #[derive(Parser)] #[clap()] pub struct Opts { #[clap(short, long, default_value = "image.svg")] file: String, #[clap(short, long, default_value = "100.0")] pub width: f64, #[clap(short, long, default_value = "150.0")] pub height: f64, #[clap(short, long, default_value = "5.0")] pub pad: f64, #[clap(short, long, default_value = "0.0")] pub seed: f64, #[clap(short, long, default_value = "0.0")] pub seed1: f64, #[clap(short, long, default_value = "0.0")] pub seed2: f64, #[clap(short, long, default_value = "0.0")] pub seed3: f64, } fn shake<R: Rng>( path: Vec<(f64, f64)>, scale: f64, rng: &mut R, ) -> Vec<(f64, f64)> { path .iter() .map(|&(x, y)| { let dx = rng.gen_range(-scale, scale); let dy = rng.gen_range(-scale, scale); (x + dx, y + dy) }) .collect() } fn eagle<R: Rng>( origin: (f64, f64), scale: f64, rotation: f64, xreverse: bool, rng: &mut R, ) -> Vec<Vec<(f64, f64)>> { let xmul = if xreverse { -1.0 } else { 1.0 }; let count = 2 + (scale * 3.0) as usize; let mut routes: Vec<Vec<(f64, f64)>> = Vec::new(); let shaking = scale * 0.1; // body let bodyw = 5.0; let bodyh = 1.5; let headcompression = rng.gen_range(0.1, 0.5); let headoff = rng.gen_range(0.1, 0.5); for i in 0..count { let yp = (i as f64 - (count - 1) as f64 * 0.5) / (count as f64); let ybase = bodyh * yp; let route = shake( path_subdivide_to_curve( vec![ (-rng.gen_range(0.4, 0.6) * bodyw, 1.5 * ybase), (-0.3 * bodyw, ybase), (0.2 * bodyw, ybase), (0.45 * bodyw, headcompression * ybase + headoff * bodyh), ], 1, 0.8, ), shaking, rng, ); routes.push(route); } let count = 2 + (scale * rng.gen_range(4.0, 6.0)) as usize; // wings let wingw = 1.4; let wingh = 8.0; let dx1 = rng.gen_range(-4.0, 4.0) * rng.gen_range(0.0, 1.0); let dx2 = if rng.gen_bool(0.8)
else { rng.gen_range(-3.0, 3.0) }; let spread1 = 1.0 + rng.gen_range(0.0, 1.0) * rng.gen_range(0.0, 1.0); let spread2 = 1.0 + rng.gen_range(0.0, 1.0) * rng.gen_range(0.0, 1.0); let offset1 = rng.gen_range(-1.0, 0.6) * rng.gen_range(0.0, 1.0); let offset2 = rng.gen_range(-1.0, 0.6) * rng.gen_range(0.0, 1.0); let interp = 0.5; let wing1m = 1.0 - rng.gen_range(0.0, 0.5) * rng.gen_range(0.0, 1.0); let wing2m = 1.0 - rng.gen_range(0.0, 0.5) * rng.gen_range(0.0, 1.0); let wing2up = rng.gen_bool(0.5); for i in 0..count { let xp = (i as f64 - (count - 1) as f64 * 0.5) / (count as f64); let xbase = wingw * xp; let wing1 = rng.gen_range(0.8, 1.1) * wing1m; let wing2 = rng.gen_range(0.8, 1.1) * wing2m * (if wing2up { -1.0 } else { 1.0 }); let route = shake( path_subdivide_to_curve( vec![ ( xbase * spread1 + dx1 + wingw * offset1, -wingh * 0.5 * wing1, ), (xbase + dx1 * interp, -wingh * 0.5 * interp * wing1), (xbase, 0.0), (xbase + dx2 * interp, wingh * 0.5 * interp * wing2), (xbase * spread2 + dx2 + wingw * offset2, wingh * 0.5 * wing2), ], 2, 0.8, ), shaking, rng, ); routes.push(route); } // scale, rotate & translate routes .iter() .map(|route| { route .iter() .map(|&p| { let p = p_r(p, rotation); (xmul * scale * p.0 + origin.0, scale * p.1 + origin.1) }) .collect() }) .collect() } fn art(opts: &Opts) -> Vec<Group> { let height = opts.height; let width = opts.width; let pad = opts.pad; let mut rng = rng_from_seed(opts.seed); let perlin = Perlin::new(); let mut passage = Passage::new(0.5, width, height); let passage_threshold = 5; let min_route = 2; let peakfactor = rng.gen_range(-0.001, 0.001) * rng.gen_range(0.0, 1.0) * rng.gen_range(0.0, 1.0); let ynoisefactor = rng.gen_range(0.02, 0.1); let yincr = rng.gen_range(1.5, 3.0); let amp2 = rng.gen_range(1.0, 12.0); let precision = rng.gen_range(0.1, 0.3); let offsetstrategy = rng.gen_range(0, 5); let mut routes = Vec::new(); let mut cave_spawned = false; let cave_threshold = rng.gen_range(0.5, 0.9) * height; let mut cave_initial_pos = Vec::new(); let w = rng.gen_range(0.05, 0.2); let mut base_y = height * 5.0; let mut miny = height; let stopy = rng.gen_range(0.2, 0.5) * height; let mut height_map: Vec<f64> = Vec::new(); loop { if miny < stopy { break; } if miny < cave_threshold && !cave_spawned { cave_spawned = true; let xfrom = (0.5 - w / 2.0) * width; let xto = (0.5 + w / 2.0) * width; let yamp = rng.gen_range(8.0, 24.0); let mut x = xfrom; let mut route = Vec::new(); loop { if x > xto { break; } let xi = (x / precision) as usize; let ybottom = height_map[xi].min(height - pad); let ytop = ybottom - yamp * (0.4 * perlin.get([opts.seed * 3.1 + 5.4, x * 0.01]).abs() + 0.6 * (1.0 - (((x - xfrom) / (xto - xfrom) - 0.5) * 2.0).powf(2.0))); height_map[xi] = ytop; cave_initial_pos.push((x, ytop)); if (route.len() / 2) % 2 == 0 { route.push((x, ybottom)); route.push((x, ytop)); } else { route.push((x, ytop)); route.push((x, ybottom)); } x += precision; } routes.push(route); } let mut route = Vec::new(); let mut x = pad; let mut was_outside = true; loop { if x > width - pad { break; } let xv = (4.0 - base_y / height) * (x - width / 2.); let amp = height * 0.05; let mut y = base_y; if offsetstrategy == 0 { y += amp * peakfactor * xv * xv; } y += -amp * perlin .get([ // xv * 0.005, y * 0.02, 77. + opts.seed / 7.3 + perlin.get([ // -opts.seed * 7.3, 8.3 + xv * 0.02, y * 0.1, ]), ]) .abs(); if offsetstrategy == 1 { y += amp * peakfactor * xv * xv; } y += amp2 * amp * perlin.get([ // 8.3 + xv * 0.01, 88.1 + y * ynoisefactor, opts.seed * 97.3, ]); if offsetstrategy == 2 { y += amp * peakfactor * xv * xv; } y += amp * perlin.get([ // opts.seed * 9.3 - 77., xv * 0.1, y * 0.5, ]) * perlin .get([ // xv * 0.02, 88.1 + y * 0.2, -opts.seed / 7.7, ]) .min(0.0); if offsetstrategy == 3 { y += amp * peakfactor * xv * xv; } y += 0.1 * amp * (1.0 - miny / height) * perlin.get([ // 66666. + opts.seed * 1.3, 88.3 + xv * 0.5, 88.1 + y * 0.5, ]); if offsetstrategy == 4 { y += amp * peakfactor * xv * xv; } if y < miny { miny = y; } let mut collides = false; let xi = (x / precision) as usize; if xi >= height_map.len() { height_map.push(y); } else { if y > height_map[xi] { collides = true; } else { height_map[xi] = y; } } let inside = !collides && pad < x && x < width - pad && pad < y && y < height - pad; if inside && passage.get((x, y)) < passage_threshold { if was_outside { if route.len() > min_route { routes.push(route); } route = Vec::new(); } was_outside = false; route.push((x, y)); passage.count((x, y)); } else { was_outside = true; } x += precision; } if route.len() > min_route { routes.push(route); } base_y -= yincr; } let radius = 6.0; passage.grow_passage(radius); rng.shuffle(&mut cave_initial_pos); let mut positions = Vec::new(); for i in 0..rng.gen_range(4, 12) { if i >= cave_initial_pos.len() - 1 { break; } let initial = cave_initial_pos[i]; let mut a = -PI / 2. + rng.gen_range(-1.0, 1.0) * rng.gen_range(0.5, 1.0); let mut p = initial; let amp = 3.0; let pad = pad * 2.; loop { if p.0 < pad || p.0 > width - pad || p.1 < pad || p.1 > height - pad { break; } p = (p.0 + amp * a.cos(), p.1 + amp * a.sin()); positions.push(p); a += rng.gen_range(-0.5, 0.5) * rng.gen_range(0.0, 1.0); } } rng.shuffle(&mut positions); let disp = rng.gen_range(0.5, 3.0); let mut eagles = Vec::new(); for p in positions { if rng.gen_bool(0.2) { continue; } let scale = rng.gen_range(0.3, 0.5); let p = ( p.0 + disp * rng.gen_range(-1.0, 1.0), p.1 + disp * rng.gen_range(-1.0, 1.0), ); if p.0 < pad || p.0 > width - pad || p.1 < pad || p.1 > height - pad { continue; } eagles.push(eagle( p, scale, rng.gen_range(-1.0, 1.0), rng.gen_bool(0.5), &mut rng, )); } routes = vec![routes, eagles.concat()].concat(); for i in 0..10 { let d = i as f64 * 0.25; routes.push(vec![ (pad + d, pad + d), (pad + d, height - pad - d), (width - pad - d, height - pad - d), (width - pad - d, pad + d), (pad + d, pad + d), ]); } let color = "black"; let mut data = Data::new(); for route in routes.clone() { data = render_route(data, route); } let mut l = layer(color); l = l.add(base_path(color, 0.35, data)); vec![l] } fn main() { let opts: Opts = Opts::parse(); let groups = art(&opts); let mut document = base_document("white", opts.width, opts.height); for g in groups { document = document.add(g); } svg::save(opts.file, &document).unwrap(); } #[derive(Clone)] struct Passage { precision: f64, width: f64, height: f64, counters: Vec<usize>, } impl Passage { pub fn new(precision: f64, width: f64, height: f64) -> Self { let wi = (width / precision).ceil() as usize; let hi = (height / precision).ceil() as usize; let counters = vec![0; wi * hi]; Passage { precision, width, height, counters, } } fn index(self: &Self, (x, y): (f64, f64)) -> usize { let wi = (self.width / self.precision).ceil() as usize; let hi = (self.height / self.precision).ceil() as usize; let xi = ((x / self.precision).round() as usize).max(0).min(wi - 1); let yi = ((y / self.precision).round() as usize).max(0).min(hi - 1); yi * wi + xi } pub fn count(self: &mut Self, p: (f64, f64)) -> usize { let i = self.index(p); let v = self.counters[i] + 1; self.counters[i] = v; v } pub fn count_once(self: &mut Self, p: (f64, f64)) { let i = self.index(p); let v = self.counters[i]; if v == 0 { self.counters[i] = 1; } } pub fn get(self: &Self, p: (f64, f64)) -> usize { let i = self.index(p); self.counters[i] } pub fn grow_passage(self: &mut Self, radius: f64) { let precision = self.precision; let width = self.width; let height = self.height; let counters: Vec<usize> = self.counters.iter().cloned().collect(); let mut mask = Vec::new(); // TODO, in future for even better perf, I will rewrite this // working directly with index integers instead of having to use index() / count_once() let mut x = -radius; loop { if x >= radius { break; } let mut y = -radius; loop { if y >= radius { break; } if x * x + y * y < radius * radius { mask.push((x, y)); } y += precision; } x += precision; } let mut x = 0.0; loop { if x >= width { break; } let mut y = 0.0; loop { if y >= height { break; } let index = self.index((x, y)); if counters[index] > 0 { for &(dx, dy) in mask.iter() { self.count_once((x + dx, y + dy)); } } y += precision; } x += precision; } } } fn lerp_point(a: (f64, f64), b: (f64, f64), m: f64) -> (f64, f64) { (a.0 * (1. - m) + b.0 * m, a.1 * (1. - m) + b.1 * m) } fn path_subdivide_to_curve_it( path: Vec<(f64, f64)>, interpolation: f64, ) -> Vec<(f64, f64)> { let l = path.len(); if l < 3 { return path; } let mut route = Vec::new(); let mut first = path[0]; let mut last = path[l - 1]; let looped = euclidian_dist(first, last) < 0.1; if looped { first = lerp_point(path[1], first, interpolation); } route.push(first); for i in 1..(l - 1) { let p = path[i]; let p1 = lerp_point(path[i - 1], p, interpolation); let p2 = lerp_point(path[i + 1], p, interpolation); route.push(p1); route.push(p2); } if looped { last = lerp_point(path[l - 2], last, interpolation); } route.push(last); if looped { route.push(first); } route } fn path_subdivide_to_curve( path: Vec<(f64, f64)>, n: usize, interpolation: f64, ) -> Vec<(f64, f64)> { let mut route = path; for _i in 0..n { route = path_subdivide_to_curve_it(route, interpolation); } route }
{ -dx1 }
conditional_block
leetcode.rs
//! The common data structure definition for leetcode problems. /** The definition of `ListNode`, used by many problems. */ #[derive(PartialEq, Eq, Debug)] pub(crate) struct ListNode { val: i32, next: Option<Box<Self>>, } impl ListNode { #[inline] fn new(val: i32) -> Self { Self { next: None, val } } } trait ToListNode { fn to_list_node(self, reverse: bool) -> Option<Box<ListNode>>; } impl ToListNode for i32 { /// Convert a number to the list of every bit of the number. fn to_list_node(mut self, reverse: bool) -> Option<Box<ListNode>> { let mut vec = vec![]; while self / 10 > 0 { vec.push(self % 10); self /= 10; } vec.push(self % 10); // the sequence of the vec is opposite of the number vec.to_list_node(!reverse) } } impl ToListNode for Vec<i32> { /// Build list node from the vector of the numbers. fn to_list_node(mut self, reverse: bool) -> Option<Box<ListNode>> { let mut next = None; if !reverse { self.reverse(); } for val in self { next = Some(Box::new(ListNode { val, next })); } next } } trait ToVec { fn to_num_vec(self) -> Vec<i32>; fn to_node_vec(self) -> Vec<Option<Box<ListNode>>>; } impl ToVec for Option<Box<ListNode>> { /// Build the vector of the numbers from the a list node. fn to_num_vec(self) -> Vec<i32> { let (mut vec, mut temp) = (vec![], &self); while let Some(n) = temp { vec.push(n.val); temp = &n.next; } vec } /// Build the vector of the node from the a list node. fn to_node_vec(self) -> Vec<Option<Box<ListNode>>> { let (mut vec, mut current) = (vec![], self); while let Some(v) = current.as_mut() { // use Option::take() to take the value out of the Option, and then leaving a None in its place. // let node = std::mem::replace(&mut v.next, None); let node = v.next.take(); vec.push(current); current = node; } vec } } use std::{cell::RefCell, rc::Rc}; /// The definition of a binary tree node (`ListNode`), used by many problems. #[derive(Debug, PartialEq, Eq)] pub(crate) struct
{ val: i32, left: Option<Rc<RefCell<Self>>>, right: Option<Rc<RefCell<Self>>>, } impl TreeNode { #[inline] fn new(val: i32) -> Self { TreeNode { val, left: None, right: None, } } #[inline] fn new_option(val: Option<i32>) -> Option<Rc<RefCell<Self>>> { val.map(|v| Rc::new(RefCell::new(Self::new(v)))) } /** Building binary tree from `Vec<Option<i32>>`, Some means valued node, None means empty node. For example: `[Some(1), Some(2), Some(3), Some(4), Some(5), Some(6)]` will be transformed to: ```html 1 / \ 2 3 / \ / 4 5 6 ``` `[Some(1), Some(2), Some(3), Some(4), None, Some(5), None, Some(6)]` will be transformed to: ```html 1 / \ 2 3 / \ / \ 4 N 5 N / 6 ``` `[Some(7), Some(5), Some(11), Some(4), None, Some(8), Some(13), Some(2), None, None, None, Some(12)]` will be transformed to: ```html 7 / \ 5 11 / \ / \ 4 N 8 13 / \ / \ / 2 N N N 12 ``` */ fn from(vec: Vec<Option<i32>>) -> Option<Rc<RefCell<Self>>> { use std::collections::VecDeque; let mut root = None; // save the root node let mut nodes: VecDeque<*mut Option<Rc<RefCell<Self>>>> = Default::default(); // save the pointer to child nodes for v in vec { // use the macro to deal with child node macro_rules! update { ($node: expr) => { if let Some(n) = &*$node { // add the pointer of child node, use raw pointer to avoid the ownership check // save the raw pointer of child node of new tree node dosn't need UNSAFE nodes.push_back(&mut n.borrow_mut().left); nodes.push_back(&mut n.borrow_mut().right); } }; } let node = Self::new_option(v); // new tree node if root.is_none() { root = node; update!(&root); } else if let Some(current) = nodes.pop_front() { unsafe { // only dereference raw pointer should under UNSAFE *current = node; update!(current); } } } root } } /// For `q15` and `q18`, check if the target is included in the **vec_list**. fn check_vecs_contain_target(vec_list: &Vec<Vec<i32>>, target: &Vec<i32>) -> bool { for old_vec in vec_list { let mut new_vec = target.clone(); for old_val in old_vec { for i in 0..new_vec.len() { // check target vec if have equal element in old_vec if old_val == &new_vec[i] { new_vec.remove(i); break; } } } // if all elemnets have been removed, mean the vec is duplicate if new_vec.is_empty() { return true; } } false } /// For `q126` and `q127`, check if two words differ by only one character. fn check_diff_one_char(old_word: &String, new_word: &String) -> bool { let mut count = 0; let (old_u8s, new_u8s): (&[u8], &[u8]) = (old_word.as_ref(), new_word.as_ref()); for i in 0..old_u8s.len() { if old_u8s[i] != new_u8s[i] { count += 1; if count > 1 { return false; } } } count == 1 } /// Check element content equivalence without element order. fn check_element_eq<T>(v1: T, v2: T) -> bool where T: IntoIterator, T::Item: Eq + std::hash::Hash + std::fmt::Debug, { use std::collections::HashMap; let (mut length1, mut length2) = (0, 0); let (mut content1, mut content2) = (HashMap::new(), HashMap::new()); for v in v1 { length1 += 1; *content1.entry(v).or_insert(0) += 1; } for v in v2 { length2 += 1; *content2.entry(v).or_insert(0) += 1; } let eq = content1 == content2 && length1 == length2; if !eq { println!("Elements are different!\nLength 1: {length1}, Length 2: {length2}"); println!("Content 1: {content1:?}\nContent 2: {content2:?}"); } eq } /** Unlike everything else in the languages, macros will remain visible in sub-modules. Also, unlike everything else in the language, macros are only accessible after their definition. Or use `#[macro_export]` to export the macro, then use macro with code "crate::xxx_macro_name!". */ macro_rules! string_vec { ($($content:expr),*) => {{ let mut temp = vec![]; $(temp.push($content.to_string());)* temp }} } /// Provide a macro to build TreeNode which can directly use the test case syntax in LeetCode. macro_rules! build_tree_node { () => { None }; // macro matcher type 'tt' means "a single token tree", // which allow a independent sub token tree for other macro usage, // until the current rust version (1.58), // only positive number or zero will be treated as a single token, // a negative number won't be treated as it ($($t:tt),*) => {{ let mut temp = vec![]; $(temp.push(covert_tree_node!($t));)* TreeNode::from(temp) }}; } // Use macro to transform the input content. macro_rules! covert_tree_node { (null) => { None }; ($l:literal) => { Some($l) }; } // normal problems mod q1008_construct_binary_search_tree_from_preorder_traversal; mod q102_binary_tree_level_order_traversal; mod q103_binary_tree_zipzag_level_order_traversal; mod q107_binary_tree_level_order_traversal_ii; mod q10_regular_expression_matching; mod q11_container_with_most_water; mod q126_word_ladder_ii; mod q127_word_ladder; mod q12_integer_to_roman; mod q16_three_sum_closest; mod q17_letter_combinations_of_a_phone_number; mod q18_four_sum; mod q19_remove_nth_node_from_end_of_list; mod q200_number_of_islands; mod q208_implement_trie; mod q212_word_search_ii; mod q22_generate_parentheses; mod q23_merge_k_sorted_lists; mod q24_swap_nodes_in_pairs; mod q25_reverse_nodes_in_k_group; mod q29_divide_two_integers; mod q2_add_two_numbers; mod q30_substring_with_concatenation_of_all_words; mod q31_next_permutation; mod q32_longest_valid_parentheses; mod q33_search_in_rotated_sorted_array; mod q34_find_first_and_last_position_of_element_in_sorted_array; mod q35_valid_sudoku; mod q37_sudoku_solver; mod q39_combination_sum; mod q3_length_of_longest_substring; mod q407_trapping_rain_water_ii; mod q40_combination_sum_ii; mod q41_first_missing_positive; mod q42_trapping_rain_water; mod q43_multiply_strings; mod q44_wildcard_matching; mod q454_four_sum_ii; mod q45_jump_game_ii; mod q46_permutations; mod q47_permutations_ii; mod q48_rotate_image; mod q49_group_anagrams; mod q4_find_median_sorted_arrays; mod q50_pow_x_n; mod q51_n_queens; mod q525_contiguous_array; mod q52_n_queens_ii; mod q53_maximum_subarray; mod q543_diameter_of_binary_tree; mod q54_spiral_matrix; mod q55_jump_game; mod q56_merge_intervals; mod q57_insert_interval; mod q59_spiral_matrix_ii; mod q5_longest_palindrome; mod q60_permutation_sequence; mod q61_rotate_list; mod q62_unique_paths; mod q63_unique_paths_ii; mod q64_minimum_path_sum; mod q65_valid_number; mod q68_text_justification; mod q6_zipzag_conversion; mod q71_simplify_path; mod q72_edit_distance; mod q73_set_matrix_zeroes; mod q74_search_a_2d_matrix; mod q75_sort_colors; mod q76_minimum_window_substring; mod q77_combinations; mod q78_subsets; mod q79_word_search; mod q7_reverse_integer; mod q80_remove_duplicates_from_sorted_array_ii; mod q81_search_in_rotated_sorted_array_ii; mod q82_remove_duplicates_from_sorted_list_ii; mod q844_backspace_string_compare; mod q84_largest_rectangle_in_histogram; mod q85_maximal_rectangle; mod q86_partition_list; mod q87_scramble_string; mod q89_gray_code; mod q8_my_atoi; mod q90_subsets_ii; mod q91_decode_ways; mod q92_reverse_linked_list_ii; mod q93_restore_ip_addresses; mod q94_binary_tree_inorder_traversal; mod q95_unique_binary_search_trees_ii; mod q96_unique_binary_search_trees; mod q97_interleaving_string; mod q98_validate_binary_search_tree; mod q99_recover_binary_search_tree; // some extra problems can only be found in "30-Day LeetCoding Challenge" mod day_30_leetcoding_challenge; // mod q834_sum_of_distances_in_tree; // DNF // mod q105_construct_binary_tree_from_preorder_and_inorder_traversal; // DNF // mod q814_binary_tree_pruning; // mod q173_binary_search_tree_iterator; // mod q958_check_completeness_of_a_binary_tree; // mod q639_decode_ways_ii; // need explain // mod q124_binary_tree_maximum_path_sum; // mod q221_maximal_square; // mod q1143_longest_common_subsequence; // mod q146_lru_cache; // mod q201_bitwise_and_of_numbers_range; // mod q560_subarray_sum_equals_k; // mod q678_valid_parenthesis_string; // mod q238_product_of_array_except_self; // mod q1046_last_stone_weight; // mod q155_min_stack; // mod q876_middle_of_the_linked_list; // mod q122_best_time_to_buy_and_sell_stock_ii; // mod q283_move_zeroes; // mod q136_single_number; // mod q202_happy_number; // mod q328_odd_even_linked_list; // mod q725_split_linked_list; // mod q885_spiral_matrix_iii; // mod q143_recoder_list; // mod q216_combination_sum_iii; // mod q377_combination_sum_iv;
TreeNode
identifier_name
leetcode.rs
//! The common data structure definition for leetcode problems. /** The definition of `ListNode`, used by many problems. */ #[derive(PartialEq, Eq, Debug)] pub(crate) struct ListNode { val: i32, next: Option<Box<Self>>, } impl ListNode { #[inline] fn new(val: i32) -> Self { Self { next: None, val } } } trait ToListNode { fn to_list_node(self, reverse: bool) -> Option<Box<ListNode>>; } impl ToListNode for i32 { /// Convert a number to the list of every bit of the number. fn to_list_node(mut self, reverse: bool) -> Option<Box<ListNode>> { let mut vec = vec![]; while self / 10 > 0 { vec.push(self % 10); self /= 10; } vec.push(self % 10); // the sequence of the vec is opposite of the number vec.to_list_node(!reverse) } } impl ToListNode for Vec<i32> { /// Build list node from the vector of the numbers. fn to_list_node(mut self, reverse: bool) -> Option<Box<ListNode>> { let mut next = None; if !reverse { self.reverse(); } for val in self { next = Some(Box::new(ListNode { val, next })); } next } } trait ToVec { fn to_num_vec(self) -> Vec<i32>; fn to_node_vec(self) -> Vec<Option<Box<ListNode>>>; } impl ToVec for Option<Box<ListNode>> { /// Build the vector of the numbers from the a list node. fn to_num_vec(self) -> Vec<i32> { let (mut vec, mut temp) = (vec![], &self); while let Some(n) = temp { vec.push(n.val); temp = &n.next; } vec } /// Build the vector of the node from the a list node. fn to_node_vec(self) -> Vec<Option<Box<ListNode>>> { let (mut vec, mut current) = (vec![], self); while let Some(v) = current.as_mut() { // use Option::take() to take the value out of the Option, and then leaving a None in its place. // let node = std::mem::replace(&mut v.next, None); let node = v.next.take(); vec.push(current); current = node; } vec } } use std::{cell::RefCell, rc::Rc}; /// The definition of a binary tree node (`ListNode`), used by many problems. #[derive(Debug, PartialEq, Eq)] pub(crate) struct TreeNode { val: i32, left: Option<Rc<RefCell<Self>>>, right: Option<Rc<RefCell<Self>>>, } impl TreeNode { #[inline] fn new(val: i32) -> Self { TreeNode { val, left: None, right: None, } } #[inline] fn new_option(val: Option<i32>) -> Option<Rc<RefCell<Self>>> { val.map(|v| Rc::new(RefCell::new(Self::new(v)))) } /** Building binary tree from `Vec<Option<i32>>`, Some means valued node, None means empty node. For example: `[Some(1), Some(2), Some(3), Some(4), Some(5), Some(6)]` will be transformed to: ```html 1 / \ 2 3 / \ / 4 5 6 ``` `[Some(1), Some(2), Some(3), Some(4), None, Some(5), None, Some(6)]` will be transformed to: ```html 1 / \ 2 3 / \ / \ 4 N 5 N / 6 ``` `[Some(7), Some(5), Some(11), Some(4), None, Some(8), Some(13), Some(2), None, None, None, Some(12)]` will be transformed to: ```html 7 / \ 5 11 / \ / \ 4 N 8 13 / \ / \ / 2 N N N 12 ``` */ fn from(vec: Vec<Option<i32>>) -> Option<Rc<RefCell<Self>>> { use std::collections::VecDeque; let mut root = None; // save the root node let mut nodes: VecDeque<*mut Option<Rc<RefCell<Self>>>> = Default::default(); // save the pointer to child nodes for v in vec { // use the macro to deal with child node macro_rules! update { ($node: expr) => { if let Some(n) = &*$node { // add the pointer of child node, use raw pointer to avoid the ownership check // save the raw pointer of child node of new tree node dosn't need UNSAFE nodes.push_back(&mut n.borrow_mut().left); nodes.push_back(&mut n.borrow_mut().right); } }; } let node = Self::new_option(v); // new tree node if root.is_none() { root = node; update!(&root); } else if let Some(current) = nodes.pop_front() { unsafe { // only dereference raw pointer should under UNSAFE *current = node; update!(current); } } } root } } /// For `q15` and `q18`, check if the target is included in the **vec_list**. fn check_vecs_contain_target(vec_list: &Vec<Vec<i32>>, target: &Vec<i32>) -> bool { for old_vec in vec_list { let mut new_vec = target.clone(); for old_val in old_vec { for i in 0..new_vec.len() { // check target vec if have equal element in old_vec if old_val == &new_vec[i] { new_vec.remove(i); break; } } } // if all elemnets have been removed, mean the vec is duplicate if new_vec.is_empty() { return true; } } false } /// For `q126` and `q127`, check if two words differ by only one character. fn check_diff_one_char(old_word: &String, new_word: &String) -> bool { let mut count = 0; let (old_u8s, new_u8s): (&[u8], &[u8]) = (old_word.as_ref(), new_word.as_ref()); for i in 0..old_u8s.len() { if old_u8s[i] != new_u8s[i] { count += 1; if count > 1
} } count == 1 } /// Check element content equivalence without element order. fn check_element_eq<T>(v1: T, v2: T) -> bool where T: IntoIterator, T::Item: Eq + std::hash::Hash + std::fmt::Debug, { use std::collections::HashMap; let (mut length1, mut length2) = (0, 0); let (mut content1, mut content2) = (HashMap::new(), HashMap::new()); for v in v1 { length1 += 1; *content1.entry(v).or_insert(0) += 1; } for v in v2 { length2 += 1; *content2.entry(v).or_insert(0) += 1; } let eq = content1 == content2 && length1 == length2; if !eq { println!("Elements are different!\nLength 1: {length1}, Length 2: {length2}"); println!("Content 1: {content1:?}\nContent 2: {content2:?}"); } eq } /** Unlike everything else in the languages, macros will remain visible in sub-modules. Also, unlike everything else in the language, macros are only accessible after their definition. Or use `#[macro_export]` to export the macro, then use macro with code "crate::xxx_macro_name!". */ macro_rules! string_vec { ($($content:expr),*) => {{ let mut temp = vec![]; $(temp.push($content.to_string());)* temp }} } /// Provide a macro to build TreeNode which can directly use the test case syntax in LeetCode. macro_rules! build_tree_node { () => { None }; // macro matcher type 'tt' means "a single token tree", // which allow a independent sub token tree for other macro usage, // until the current rust version (1.58), // only positive number or zero will be treated as a single token, // a negative number won't be treated as it ($($t:tt),*) => {{ let mut temp = vec![]; $(temp.push(covert_tree_node!($t));)* TreeNode::from(temp) }}; } // Use macro to transform the input content. macro_rules! covert_tree_node { (null) => { None }; ($l:literal) => { Some($l) }; } // normal problems mod q1008_construct_binary_search_tree_from_preorder_traversal; mod q102_binary_tree_level_order_traversal; mod q103_binary_tree_zipzag_level_order_traversal; mod q107_binary_tree_level_order_traversal_ii; mod q10_regular_expression_matching; mod q11_container_with_most_water; mod q126_word_ladder_ii; mod q127_word_ladder; mod q12_integer_to_roman; mod q16_three_sum_closest; mod q17_letter_combinations_of_a_phone_number; mod q18_four_sum; mod q19_remove_nth_node_from_end_of_list; mod q200_number_of_islands; mod q208_implement_trie; mod q212_word_search_ii; mod q22_generate_parentheses; mod q23_merge_k_sorted_lists; mod q24_swap_nodes_in_pairs; mod q25_reverse_nodes_in_k_group; mod q29_divide_two_integers; mod q2_add_two_numbers; mod q30_substring_with_concatenation_of_all_words; mod q31_next_permutation; mod q32_longest_valid_parentheses; mod q33_search_in_rotated_sorted_array; mod q34_find_first_and_last_position_of_element_in_sorted_array; mod q35_valid_sudoku; mod q37_sudoku_solver; mod q39_combination_sum; mod q3_length_of_longest_substring; mod q407_trapping_rain_water_ii; mod q40_combination_sum_ii; mod q41_first_missing_positive; mod q42_trapping_rain_water; mod q43_multiply_strings; mod q44_wildcard_matching; mod q454_four_sum_ii; mod q45_jump_game_ii; mod q46_permutations; mod q47_permutations_ii; mod q48_rotate_image; mod q49_group_anagrams; mod q4_find_median_sorted_arrays; mod q50_pow_x_n; mod q51_n_queens; mod q525_contiguous_array; mod q52_n_queens_ii; mod q53_maximum_subarray; mod q543_diameter_of_binary_tree; mod q54_spiral_matrix; mod q55_jump_game; mod q56_merge_intervals; mod q57_insert_interval; mod q59_spiral_matrix_ii; mod q5_longest_palindrome; mod q60_permutation_sequence; mod q61_rotate_list; mod q62_unique_paths; mod q63_unique_paths_ii; mod q64_minimum_path_sum; mod q65_valid_number; mod q68_text_justification; mod q6_zipzag_conversion; mod q71_simplify_path; mod q72_edit_distance; mod q73_set_matrix_zeroes; mod q74_search_a_2d_matrix; mod q75_sort_colors; mod q76_minimum_window_substring; mod q77_combinations; mod q78_subsets; mod q79_word_search; mod q7_reverse_integer; mod q80_remove_duplicates_from_sorted_array_ii; mod q81_search_in_rotated_sorted_array_ii; mod q82_remove_duplicates_from_sorted_list_ii; mod q844_backspace_string_compare; mod q84_largest_rectangle_in_histogram; mod q85_maximal_rectangle; mod q86_partition_list; mod q87_scramble_string; mod q89_gray_code; mod q8_my_atoi; mod q90_subsets_ii; mod q91_decode_ways; mod q92_reverse_linked_list_ii; mod q93_restore_ip_addresses; mod q94_binary_tree_inorder_traversal; mod q95_unique_binary_search_trees_ii; mod q96_unique_binary_search_trees; mod q97_interleaving_string; mod q98_validate_binary_search_tree; mod q99_recover_binary_search_tree; // some extra problems can only be found in "30-Day LeetCoding Challenge" mod day_30_leetcoding_challenge; // mod q834_sum_of_distances_in_tree; // DNF // mod q105_construct_binary_tree_from_preorder_and_inorder_traversal; // DNF // mod q814_binary_tree_pruning; // mod q173_binary_search_tree_iterator; // mod q958_check_completeness_of_a_binary_tree; // mod q639_decode_ways_ii; // need explain // mod q124_binary_tree_maximum_path_sum; // mod q221_maximal_square; // mod q1143_longest_common_subsequence; // mod q146_lru_cache; // mod q201_bitwise_and_of_numbers_range; // mod q560_subarray_sum_equals_k; // mod q678_valid_parenthesis_string; // mod q238_product_of_array_except_self; // mod q1046_last_stone_weight; // mod q155_min_stack; // mod q876_middle_of_the_linked_list; // mod q122_best_time_to_buy_and_sell_stock_ii; // mod q283_move_zeroes; // mod q136_single_number; // mod q202_happy_number; // mod q328_odd_even_linked_list; // mod q725_split_linked_list; // mod q885_spiral_matrix_iii; // mod q143_recoder_list; // mod q216_combination_sum_iii; // mod q377_combination_sum_iv;
{ return false; }
conditional_block
leetcode.rs
//! The common data structure definition for leetcode problems. /** The definition of `ListNode`, used by many problems. */ #[derive(PartialEq, Eq, Debug)] pub(crate) struct ListNode { val: i32, next: Option<Box<Self>>, } impl ListNode { #[inline] fn new(val: i32) -> Self { Self { next: None, val } } } trait ToListNode { fn to_list_node(self, reverse: bool) -> Option<Box<ListNode>>; } impl ToListNode for i32 { /// Convert a number to the list of every bit of the number. fn to_list_node(mut self, reverse: bool) -> Option<Box<ListNode>> { let mut vec = vec![]; while self / 10 > 0 { vec.push(self % 10); self /= 10; } vec.push(self % 10); // the sequence of the vec is opposite of the number vec.to_list_node(!reverse) } } impl ToListNode for Vec<i32> { /// Build list node from the vector of the numbers. fn to_list_node(mut self, reverse: bool) -> Option<Box<ListNode>> { let mut next = None; if !reverse { self.reverse(); } for val in self { next = Some(Box::new(ListNode { val, next })); } next } } trait ToVec { fn to_num_vec(self) -> Vec<i32>; fn to_node_vec(self) -> Vec<Option<Box<ListNode>>>; } impl ToVec for Option<Box<ListNode>> { /// Build the vector of the numbers from the a list node. fn to_num_vec(self) -> Vec<i32> { let (mut vec, mut temp) = (vec![], &self); while let Some(n) = temp { vec.push(n.val); temp = &n.next; } vec } /// Build the vector of the node from the a list node. fn to_node_vec(self) -> Vec<Option<Box<ListNode>>> { let (mut vec, mut current) = (vec![], self); while let Some(v) = current.as_mut() { // use Option::take() to take the value out of the Option, and then leaving a None in its place. // let node = std::mem::replace(&mut v.next, None); let node = v.next.take(); vec.push(current); current = node; } vec } } use std::{cell::RefCell, rc::Rc}; /// The definition of a binary tree node (`ListNode`), used by many problems. #[derive(Debug, PartialEq, Eq)] pub(crate) struct TreeNode { val: i32, left: Option<Rc<RefCell<Self>>>, right: Option<Rc<RefCell<Self>>>, } impl TreeNode { #[inline] fn new(val: i32) -> Self { TreeNode { val, left: None, right: None, } } #[inline] fn new_option(val: Option<i32>) -> Option<Rc<RefCell<Self>>> { val.map(|v| Rc::new(RefCell::new(Self::new(v)))) } /** Building binary tree from `Vec<Option<i32>>`, Some means valued node, None means empty node. For example: `[Some(1), Some(2), Some(3), Some(4), Some(5), Some(6)]` will be transformed to: ```html 1 / \ 2 3 / \ / 4 5 6 ``` `[Some(1), Some(2), Some(3), Some(4), None, Some(5), None, Some(6)]` will be transformed to: ```html 1 / \ 2 3 / \ / \ 4 N 5 N / 6 ``` `[Some(7), Some(5), Some(11), Some(4), None, Some(8), Some(13), Some(2), None, None, None, Some(12)]` will be transformed to: ```html 7 / \ 5 11 / \ / \ 4 N 8 13 / \ / \ / 2 N N N 12 ``` */ fn from(vec: Vec<Option<i32>>) -> Option<Rc<RefCell<Self>>> { use std::collections::VecDeque; let mut root = None; // save the root node let mut nodes: VecDeque<*mut Option<Rc<RefCell<Self>>>> = Default::default(); // save the pointer to child nodes for v in vec { // use the macro to deal with child node macro_rules! update { ($node: expr) => { if let Some(n) = &*$node { // add the pointer of child node, use raw pointer to avoid the ownership check // save the raw pointer of child node of new tree node dosn't need UNSAFE nodes.push_back(&mut n.borrow_mut().left); nodes.push_back(&mut n.borrow_mut().right); } }; } let node = Self::new_option(v); // new tree node if root.is_none() { root = node; update!(&root); } else if let Some(current) = nodes.pop_front() { unsafe { // only dereference raw pointer should under UNSAFE *current = node; update!(current); } } } root } } /// For `q15` and `q18`, check if the target is included in the **vec_list**. fn check_vecs_contain_target(vec_list: &Vec<Vec<i32>>, target: &Vec<i32>) -> bool { for old_vec in vec_list { let mut new_vec = target.clone(); for old_val in old_vec { for i in 0..new_vec.len() { // check target vec if have equal element in old_vec if old_val == &new_vec[i] { new_vec.remove(i); break; } } } // if all elemnets have been removed, mean the vec is duplicate if new_vec.is_empty() { return true; } } false } /// For `q126` and `q127`, check if two words differ by only one character. fn check_diff_one_char(old_word: &String, new_word: &String) -> bool { let mut count = 0; let (old_u8s, new_u8s): (&[u8], &[u8]) = (old_word.as_ref(), new_word.as_ref()); for i in 0..old_u8s.len() { if old_u8s[i] != new_u8s[i] { count += 1; if count > 1 { return false; } } } count == 1 } /// Check element content equivalence without element order. fn check_element_eq<T>(v1: T, v2: T) -> bool where T: IntoIterator, T::Item: Eq + std::hash::Hash + std::fmt::Debug, { use std::collections::HashMap; let (mut length1, mut length2) = (0, 0); let (mut content1, mut content2) = (HashMap::new(), HashMap::new()); for v in v1 { length1 += 1; *content1.entry(v).or_insert(0) += 1; } for v in v2 { length2 += 1; *content2.entry(v).or_insert(0) += 1; } let eq = content1 == content2 && length1 == length2; if !eq { println!("Elements are different!\nLength 1: {length1}, Length 2: {length2}"); println!("Content 1: {content1:?}\nContent 2: {content2:?}"); } eq } /** Unlike everything else in the languages, macros will remain visible in sub-modules. Also, unlike everything else in the language, macros are only accessible after their definition. Or use `#[macro_export]` to export the macro, then use macro with code "crate::xxx_macro_name!". */ macro_rules! string_vec { ($($content:expr),*) => {{ let mut temp = vec![]; $(temp.push($content.to_string());)* temp }} } /// Provide a macro to build TreeNode which can directly use the test case syntax in LeetCode. macro_rules! build_tree_node { () => { None }; // macro matcher type 'tt' means "a single token tree", // which allow a independent sub token tree for other macro usage, // until the current rust version (1.58), // only positive number or zero will be treated as a single token, // a negative number won't be treated as it ($($t:tt),*) => {{ let mut temp = vec![]; $(temp.push(covert_tree_node!($t));)* TreeNode::from(temp) }}; } // Use macro to transform the input content. macro_rules! covert_tree_node { (null) => { None }; ($l:literal) => { Some($l) }; } // normal problems mod q1008_construct_binary_search_tree_from_preorder_traversal; mod q102_binary_tree_level_order_traversal; mod q103_binary_tree_zipzag_level_order_traversal; mod q107_binary_tree_level_order_traversal_ii; mod q10_regular_expression_matching; mod q11_container_with_most_water; mod q126_word_ladder_ii; mod q127_word_ladder; mod q12_integer_to_roman; mod q16_three_sum_closest; mod q17_letter_combinations_of_a_phone_number; mod q18_four_sum; mod q19_remove_nth_node_from_end_of_list; mod q200_number_of_islands; mod q208_implement_trie; mod q212_word_search_ii; mod q22_generate_parentheses; mod q23_merge_k_sorted_lists; mod q24_swap_nodes_in_pairs; mod q25_reverse_nodes_in_k_group; mod q29_divide_two_integers; mod q2_add_two_numbers; mod q30_substring_with_concatenation_of_all_words; mod q31_next_permutation; mod q32_longest_valid_parentheses; mod q33_search_in_rotated_sorted_array; mod q34_find_first_and_last_position_of_element_in_sorted_array; mod q35_valid_sudoku; mod q37_sudoku_solver; mod q39_combination_sum; mod q3_length_of_longest_substring; mod q407_trapping_rain_water_ii; mod q40_combination_sum_ii; mod q41_first_missing_positive; mod q42_trapping_rain_water; mod q43_multiply_strings; mod q44_wildcard_matching; mod q454_four_sum_ii; mod q45_jump_game_ii; mod q46_permutations; mod q47_permutations_ii; mod q48_rotate_image; mod q49_group_anagrams; mod q4_find_median_sorted_arrays; mod q50_pow_x_n; mod q51_n_queens; mod q525_contiguous_array; mod q52_n_queens_ii; mod q53_maximum_subarray; mod q543_diameter_of_binary_tree; mod q54_spiral_matrix; mod q55_jump_game; mod q56_merge_intervals; mod q57_insert_interval; mod q59_spiral_matrix_ii; mod q5_longest_palindrome; mod q60_permutation_sequence; mod q61_rotate_list; mod q62_unique_paths; mod q63_unique_paths_ii; mod q64_minimum_path_sum; mod q65_valid_number; mod q68_text_justification; mod q6_zipzag_conversion; mod q71_simplify_path; mod q72_edit_distance; mod q73_set_matrix_zeroes; mod q74_search_a_2d_matrix; mod q75_sort_colors; mod q76_minimum_window_substring; mod q77_combinations; mod q78_subsets; mod q79_word_search; mod q7_reverse_integer; mod q80_remove_duplicates_from_sorted_array_ii; mod q81_search_in_rotated_sorted_array_ii; mod q82_remove_duplicates_from_sorted_list_ii; mod q844_backspace_string_compare;
mod q87_scramble_string; mod q89_gray_code; mod q8_my_atoi; mod q90_subsets_ii; mod q91_decode_ways; mod q92_reverse_linked_list_ii; mod q93_restore_ip_addresses; mod q94_binary_tree_inorder_traversal; mod q95_unique_binary_search_trees_ii; mod q96_unique_binary_search_trees; mod q97_interleaving_string; mod q98_validate_binary_search_tree; mod q99_recover_binary_search_tree; // some extra problems can only be found in "30-Day LeetCoding Challenge" mod day_30_leetcoding_challenge; // mod q834_sum_of_distances_in_tree; // DNF // mod q105_construct_binary_tree_from_preorder_and_inorder_traversal; // DNF // mod q814_binary_tree_pruning; // mod q173_binary_search_tree_iterator; // mod q958_check_completeness_of_a_binary_tree; // mod q639_decode_ways_ii; // need explain // mod q124_binary_tree_maximum_path_sum; // mod q221_maximal_square; // mod q1143_longest_common_subsequence; // mod q146_lru_cache; // mod q201_bitwise_and_of_numbers_range; // mod q560_subarray_sum_equals_k; // mod q678_valid_parenthesis_string; // mod q238_product_of_array_except_self; // mod q1046_last_stone_weight; // mod q155_min_stack; // mod q876_middle_of_the_linked_list; // mod q122_best_time_to_buy_and_sell_stock_ii; // mod q283_move_zeroes; // mod q136_single_number; // mod q202_happy_number; // mod q328_odd_even_linked_list; // mod q725_split_linked_list; // mod q885_spiral_matrix_iii; // mod q143_recoder_list; // mod q216_combination_sum_iii; // mod q377_combination_sum_iv;
mod q84_largest_rectangle_in_histogram; mod q85_maximal_rectangle; mod q86_partition_list;
random_line_split
lib.rs
#![no_std] //! //! You can populate [`Petnames`] with your own word lists, but the word lists //! from upstream [petname](https://github.com/dustinkirkland/petname) are //! included with the `default_dictionary` feature (enabled by default). See //! [`Petnames::small`], [`Petnames::medium`], and [`Petnames::large`] to select //! a particular built-in word list, or use the [`Default`] implementation. //! //! The other thing you need is a random number generator from [rand][]: //! //! ```rust //! # #[cfg(feature = "std_rng")] //! let mut rng = rand::thread_rng(); //! # #[cfg(all(feature = "std_rng", feature = "default_dictionary"))] //! let pname = petname::Petnames::default().generate(&mut rng, 7, ":"); //! ``` //! //! It may be more convenient to use the default random number generator: //! //! ```rust //! # #[cfg(all(feature = "std_rng", feature = "default_dictionary"))] //! let pname = petname::Petnames::default().generate_one(7, ":"); //! ``` //! //! There's a [convenience function][petname] that'll do all of this: //! //! ```rust //! # #[cfg(all(feature = "std_rng", feature = "default_dictionary"))] //! let pname = petname::petname(7, ":"); //! ``` //! //! But the most flexible approach is to create an [`Iterator`] with //! [`iter`][`Petnames::iter`]: //! //! ```rust //! # #[cfg(feature = "std_rng")] //! let mut rng = rand::thread_rng(); //! # #[cfg(feature = "default_dictionary")] //! let petnames = petname::Petnames::default(); //! # #[cfg(all(feature = "std_rng", feature = "default_dictionary"))] //! let ten_thousand_names: Vec<String> = //! petnames.iter(&mut rng, 3, "_").take(10000).collect(); //! ``` //! //! You can modify the word lists to, for example, only use words beginning with //! the letter "b": //! //! ```rust //! # #[cfg(feature = "default_dictionary")] //! let mut petnames = petname::Petnames::default(); //! # #[cfg(feature = "default_dictionary")] //! petnames.retain(|s| s.starts_with("b")); //! # #[cfg(all(feature = "std_rng", feature = "default_dictionary"))] //! petnames.generate_one(3, "."); //! ``` //! extern crate alloc; use alloc::{ borrow::Cow, string::{String, ToString}, vec::Vec, }; use itertools::Itertools; use rand::seq::SliceRandom; /// Convenience function to generate a new petname from default word lists. #[allow(dead_code)] #[cfg(feature = "std_rng")] #[cfg(feature = "default_dictionary")] pub fn petname(words: u8, separator: &str) -> String { Petnames::new().generate_one(words, separator) } /// A word list. pub type Words<'a> = Cow<'a, [&'a str]>; /// Word lists and the logic to combine them into _petnames_. /// /// A _petname_ with `n` words will contain, in order: /// /// * `n - 2` adverbs when `n >= 2`, otherwise 0 adverbs. /// * 1 adjective when `n >= 2`, otherwise 0 adjectives. /// * 1 name / noun when `n >= 1`, otherwise 0 names. /// #[derive(Clone, Debug, Eq, PartialEq)] pub struct Petnames<'a> { pub adjectives: Words<'a>, pub adverbs: Words<'a>, pub names: Words<'a>, } #[cfg(feature = "default_dictionary")] mod words { include!(concat!(env!("OUT_DIR"), "/words.rs")); } impl<'a> Petnames<'a> { /// Constructs a new `Petnames` from the default (small) word lists. #[cfg(feature = "default_dictionary")] pub fn new() -> Self { Self::default() } /// Constructs a new `Petnames` from the small word lists. #[cfg(feature = "default_dictionary")] pub fn small() -> Self { Self { adjectives: Cow::from(&words::small::ADJECTIVES[..]), adverbs: Cow::from(&words::small::ADVERBS[..]), names: Cow::from(&words::small::NAMES[..]), } } /// Constructs a new `Petnames` from the medium word lists. #[cfg(feature = "default_dictionary")] pub fn medium() -> Self { Self { adjectives: Cow::from(&words::medium::ADJECTIVES[..]), adverbs: Cow::from(&words::medium::ADVERBS[..]), names: Cow::from(&words::medium::NAMES[..]), } } /// Constructs a new `Petnames` from the large word lists. #[cfg(feature = "default_dictionary")] pub fn large() -> Self { Self { adjectives: Cow::from(&words::large::ADJECTIVES[..]), adverbs: Cow::from(&words::large::ADVERBS[..]), names: Cow::from(&words::large::NAMES[..]), } } /// Constructs a new `Petnames` from the given word lists. /// /// The words are extracted from the given strings by splitting on whitespace. pub fn init(adjectives: &'a str, adverbs: &'a str, names: &'a str) -> Self { Self { adjectives: Cow::Owned(adjectives.split_whitespace().collect::<Vec<_>>()), adverbs: Cow::Owned(adverbs.split_whitespace().collect::<Vec<_>>()), names: Cow::Owned(names.split_whitespace().collect::<Vec<_>>()), } } /// Keep words matching a predicate. /// /// # Examples /// /// ```rust /// # #[cfg(feature = "default_dictionary")] /// let mut petnames = petname::Petnames::default(); /// # #[cfg(feature = "default_dictionary")] /// petnames.retain(|s| s.starts_with("b")); /// # #[cfg(feature = "default_dictionary")] /// # #[cfg(feature = "std_rng")] /// petnames.generate_one(2, "."); /// ``` /// /// This is merely a convenience wrapper that applies the same predicate to /// the adjectives, adverbs, and names lists. /// pub fn retain<F>(&mut self, mut predicate: F) where F: FnMut(&str) -> bool, { self.adjectives.to_mut().retain(|word| predicate(word)); self.adverbs.to_mut().retain(|word| predicate(word)); self.names.to_mut().retain(|word| predicate(word)); } /// Calculate the cardinality of this `Petnames`. /// /// If this is low, names may be repeated by the generator with a higher /// frequency than your use-case may allow. If it is 0 (zero) the generator /// will panic (unless `words` is also zero). /// /// This can saturate. If the total possible combinations of words exceeds /// `u128::MAX` then this will return `u128::MAX`. pub fn cardinality(&self, words: u8) -> u128 { Lists::new(words) .map(|list| match list { List::Adverb => self.adverbs.len() as u128, List::Adjective => self.adjectives.len() as u128, List::Name => self.names.len() as u128, }) .reduce(u128::saturating_mul) .unwrap_or(0u128) } /// Generate a new petname. /// /// # Examples /// /// ```rust /// # #[cfg(all(feature = "std_rng", feature = "default_dictionary"))] /// let mut rng = rand::thread_rng(); /// # #[cfg(all(feature = "std_rng", feature = "default_dictionary"))] /// petname::Petnames::default().generate(&mut rng, 7, ":"); /// ``` /// /// # Notes /// /// This may return fewer words than you request if one or more of the word /// lists are empty. For example, if there are no adverbs, requesting 3 or /// more words may still yield only "doubtful-salmon". /// pub fn generate<RNG>(&self, rng: &mut RNG, words: u8, separator: &str) -> String where RNG: rand::Rng, { Itertools::intersperse( Lists::new(words).filter_map(|list| match list { List::Adverb => self.adverbs.choose(rng).copied(), List::Adjective => self.adjectives.choose(rng).copied(), List::Name => self.names.choose(rng).copied(), }), separator, ) .collect::<String>() } /// Generate a single new petname. /// /// This is like `generate` but uses `rand::thread_rng` as the random /// source. For efficiency use `generate` when creating multiple names, or /// when you want to use a custom source of randomness. #[cfg(feature = "std_rng")] pub fn generate_one(&self, words: u8, separator: &str) -> String { self.generate(&mut rand::thread_rng(), words, separator) } /// Iterator yielding petnames. /// /// # Examples /// /// ```rust /// # #[cfg(all(feature = "std_rng", feature = "default_dictionary"))] /// let mut rng = rand::thread_rng(); /// # #[cfg(all(feature = "std_rng", feature = "default_dictionary"))] /// let petnames = petname::Petnames::default(); /// # #[cfg(all(feature = "std_rng", feature = "default_dictionary"))] /// let mut iter = petnames.iter(&mut rng, 4, "_"); /// # #[cfg(all(feature = "std_rng", feature = "default_dictionary"))] /// println!("name: {}", iter.next().unwrap()); /// ``` /// pub fn
<RNG>(&'a self, rng: &'a mut RNG, words: u8, separator: &str) -> Names<'a, RNG> where RNG: rand::Rng, { Names { petnames: self, rng, words, separator: separator.to_string() } } /// Iterator yielding unique – i.e. non-repeating – petnames. /// /// # Examples /// /// ```rust /// # #[cfg(all(feature = "std_rng", feature = "default_dictionary"))] /// let mut rng = rand::thread_rng(); /// # #[cfg(all(feature = "std_rng", feature = "default_dictionary"))] /// let petnames = petname::Petnames::default(); /// # #[cfg(all(feature = "std_rng", feature = "default_dictionary"))] /// let mut iter = petnames.iter_non_repeating(&mut rng, 4, "_"); /// # #[cfg(all(feature = "std_rng", feature = "default_dictionary"))] /// println!("name: {}", iter.next().unwrap()); /// ``` /// pub fn iter_non_repeating<RNG>( &'a self, rng: &'a mut RNG, words: u8, separator: &str, ) -> impl Iterator<Item = String> + 'a where RNG: rand::Rng, { let lists: Vec<&'a Words<'a>> = Lists::new(words) .map(|list| match list { List::Adverb => &self.adverbs, List::Adjective => &self.adjectives, List::Name => &self.names, }) .collect(); NamesProduct::shuffled(&lists, rng, separator) } } #[cfg(feature = "default_dictionary")] impl<'a> Default for Petnames<'a> { fn default() -> Self { Self::small() } } /// Enum representing which word list to use. #[derive(Debug, PartialEq)] enum List { Adverb, Adjective, Name, } /// Iterator, yielding which word list to use next. /// /// This yields the appropriate list – [adverbs][List::Adverb], /// [adjectives][List::Adjective]s, [names][List::Name] – from which to select /// a word when constructing a petname of `n` words. For example, if you want 4 /// words in your petname, this will first yield [List::Adverb], then /// [List::Adverb] again, then [List::Adjective], and lastly [List::Name]. #[derive(Debug, PartialEq)] enum Lists { Adverb(u8), Adjective, Name, Done, } impl Lists { fn new(words: u8) -> Self { match words { 0 => Self::Done, 1 => Self::Name, 2 => Self::Adjective, n => Self::Adverb(n - 3), } } fn advance(&mut self) { *self = match self { Self::Adverb(0) => Self::Adjective, Self::Adverb(remaining) => Self::Adverb(*remaining - 1), Self::Adjective => Self::Name, Self::Name | Self::Done => Self::Done, } } } impl Iterator for Lists { type Item = List; fn next(&mut self) -> Option<Self::Item> { let list = match self { Self::Adjective => Some(List::Adjective), Self::Adverb(_) => Some(List::Adverb), Self::Name => Some(List::Name), Self::Done => None, }; self.advance(); list } fn size_hint(&self) -> (usize, Option<usize>) { let remains = match self { Self::Adverb(n) => (n + 3) as usize, Self::Adjective => 2, Self::Name => 1, Self::Done => 0, }; (remains, Some(remains)) } } /// Iterator yielding petnames. pub struct Names<'a, RNG> where RNG: rand::Rng, { petnames: &'a Petnames<'a>, rng: &'a mut RNG, words: u8, separator: String, } impl<'a, RNG> Names<'a, RNG> where RNG: rand::Rng, { /// Calculate the cardinality of this iterator; see `Petnames::cardinality`. #[allow(dead_code)] pub fn cardinality(&self) -> u128 { self.petnames.cardinality(self.words) } } impl<'a, RNG> Iterator for Names<'a, RNG> where RNG: rand::Rng, { type Item = String; fn next(&mut self) -> Option<Self::Item> { Some(self.petnames.generate(self.rng, self.words, &self.separator)) } } /// Iterator yielding petnames from the product of given word lists. /// /// This can be used to ensure that only unique names are produced. struct NamesProduct<'a, ITERATOR> where ITERATOR: Iterator<Item = Option<&'a str>>, { iters: Vec<(ITERATOR, Option<&'a str>)>, separator: String, capacity: usize, } impl<'a> NamesProduct<'a, core::iter::Cycle<alloc::vec::IntoIter<Option<&'a str>>>> { /// Shuffles each of the given `lists` with `rng`, then cycles through the /// product of the lists, joining with `separator`. The leftmost list will /// cycle most rapidly. fn shuffled<RNG>(lists: &[&'a Words<'a>], rng: &'a mut RNG, separator: &str) -> Self where RNG: rand::Rng, { NamesProduct { iters: lists .iter() .map(|words| { let mut list: Vec<Option<&'a str>> = Vec::with_capacity(words.len().saturating_add(1)); list.extend(words.iter().map(|word| Some(*word))); list.shuffle(rng); // Could be expensive. list.push(None); // Cycle marker. (list.into_iter().cycle(), None) }) .collect(), separator: separator.to_string(), capacity: Self::capacity(lists, separator), } } fn capacity(lists: &[&'a Words<'a>], separator: &str) -> usize { ( // Sum of the length of the longest possible word in each word list. lists .iter() .filter_map(|words| words.iter().map(|word| word.len()).max()) .fold(0usize, |sum, len| sum.saturating_add(len)) // The total length of all separators. Careful not to wrap usize. + (separator.len().saturating_mul(lists.len().saturating_sub(1))) ) // Things run _much_ quicker when the capacity is a power of 2. Memory // alignment? If so it may be enough to align at, say, 8 bytes, but this // works for now. .checked_next_power_of_two() // In case there are no lists, or they're all empty... or we have // calculated that we need more than usize::MAX capacity. .unwrap_or(0) } } impl<'a, ITERATOR> Iterator for NamesProduct<'a, ITERATOR> where ITERATOR: Iterator<Item = Option<&'a str>>, { type Item = String; fn next(&mut self) -> Option<Self::Item> { let mut bump = true; // Request advance of next iterator. for (iter, word) in self.iters.iter_mut() { if bump || word.is_none() { match iter.next() { None => { // This shouldn't happen because we expect the iterators // to cycle. However, if it does, we're definitely done. return None; } Some(None) => { // This is the cycle end marker. We want to get another // new word from this iterator, and advance the *next* // iterator too. match iter.next() { None => return None, Some(None) => return None, Some(s) => *word = s, } bump = true } Some(s) => { // We have a new word from this iterator, so we do not // yet need to advance the next iterator. *word = s; bump = false } } } } if bump { // We reached the end of the last iterator, hence we're done. None } else { // We may be able to construct a word! self.iters.iter().try_fold(String::with_capacity(self.capacity), |acc, (_, w)| match (acc, *w) { (s, Some(w)) if s.is_empty() => Some(s + w), (s, Some(w)) => Some(s + &self.separator + w), _ => None, }) } } } #[cfg(test)] mod tests { #[test] fn lists_sequences_adverbs_adjectives_then_names() { let mut lists = super::Lists::new(4); assert_eq!(super::Lists::Adverb(1), lists); assert_eq!(Some(super::List::Adverb), lists.next()); assert_eq!(super::Lists::Adverb(0), lists); assert_eq!(Some(super::List::Adverb), lists.next()); assert_eq!(super::Lists::Adjective, lists); assert_eq!(Some(super::List::Adjective), lists.next()); assert_eq!(super::Lists::Name, lists); assert_eq!(Some(super::List::Name), lists.next()); assert_eq!(super::Lists::Done, lists); assert_eq!(None, lists.next()); } #[test] fn lists_size_hint() { let mut lists = super::Lists::new(3); assert_eq!((3, Some(3)), lists.size_hint()); assert!(lists.next().is_some()); assert_eq!((2, Some(2)), lists.size_hint()); assert!(lists.next().is_some()); assert_eq!((1, Some(1)), lists.size_hint()); assert!(lists.next().is_some()); assert_eq!((0, Some(0)), lists.size_hint()); assert_eq!(None, lists.next()); assert_eq!((0, Some(0)), lists.size_hint()); } }
iter
identifier_name
lib.rs
#![no_std] //! //! You can populate [`Petnames`] with your own word lists, but the word lists //! from upstream [petname](https://github.com/dustinkirkland/petname) are //! included with the `default_dictionary` feature (enabled by default). See //! [`Petnames::small`], [`Petnames::medium`], and [`Petnames::large`] to select //! a particular built-in word list, or use the [`Default`] implementation. //! //! The other thing you need is a random number generator from [rand][]: //! //! ```rust //! # #[cfg(feature = "std_rng")] //! let mut rng = rand::thread_rng(); //! # #[cfg(all(feature = "std_rng", feature = "default_dictionary"))] //! let pname = petname::Petnames::default().generate(&mut rng, 7, ":"); //! ``` //! //! It may be more convenient to use the default random number generator: //! //! ```rust //! # #[cfg(all(feature = "std_rng", feature = "default_dictionary"))] //! let pname = petname::Petnames::default().generate_one(7, ":"); //! ``` //! //! There's a [convenience function][petname] that'll do all of this: //! //! ```rust //! # #[cfg(all(feature = "std_rng", feature = "default_dictionary"))] //! let pname = petname::petname(7, ":"); //! ``` //! //! But the most flexible approach is to create an [`Iterator`] with //! [`iter`][`Petnames::iter`]: //! //! ```rust //! # #[cfg(feature = "std_rng")] //! let mut rng = rand::thread_rng(); //! # #[cfg(feature = "default_dictionary")] //! let petnames = petname::Petnames::default(); //! # #[cfg(all(feature = "std_rng", feature = "default_dictionary"))] //! let ten_thousand_names: Vec<String> = //! petnames.iter(&mut rng, 3, "_").take(10000).collect(); //! ``` //! //! You can modify the word lists to, for example, only use words beginning with //! the letter "b": //! //! ```rust //! # #[cfg(feature = "default_dictionary")] //! let mut petnames = petname::Petnames::default(); //! # #[cfg(feature = "default_dictionary")] //! petnames.retain(|s| s.starts_with("b")); //! # #[cfg(all(feature = "std_rng", feature = "default_dictionary"))] //! petnames.generate_one(3, "."); //! ``` //! extern crate alloc; use alloc::{ borrow::Cow, string::{String, ToString}, vec::Vec, }; use itertools::Itertools; use rand::seq::SliceRandom; /// Convenience function to generate a new petname from default word lists. #[allow(dead_code)] #[cfg(feature = "std_rng")] #[cfg(feature = "default_dictionary")] pub fn petname(words: u8, separator: &str) -> String { Petnames::new().generate_one(words, separator) } /// A word list. pub type Words<'a> = Cow<'a, [&'a str]>; /// Word lists and the logic to combine them into _petnames_. /// /// A _petname_ with `n` words will contain, in order: /// /// * `n - 2` adverbs when `n >= 2`, otherwise 0 adverbs. /// * 1 adjective when `n >= 2`, otherwise 0 adjectives. /// * 1 name / noun when `n >= 1`, otherwise 0 names. /// #[derive(Clone, Debug, Eq, PartialEq)] pub struct Petnames<'a> { pub adjectives: Words<'a>, pub adverbs: Words<'a>, pub names: Words<'a>, } #[cfg(feature = "default_dictionary")] mod words { include!(concat!(env!("OUT_DIR"), "/words.rs")); } impl<'a> Petnames<'a> { /// Constructs a new `Petnames` from the default (small) word lists. #[cfg(feature = "default_dictionary")] pub fn new() -> Self { Self::default() } /// Constructs a new `Petnames` from the small word lists. #[cfg(feature = "default_dictionary")] pub fn small() -> Self { Self { adjectives: Cow::from(&words::small::ADJECTIVES[..]), adverbs: Cow::from(&words::small::ADVERBS[..]), names: Cow::from(&words::small::NAMES[..]), } } /// Constructs a new `Petnames` from the medium word lists. #[cfg(feature = "default_dictionary")] pub fn medium() -> Self { Self { adjectives: Cow::from(&words::medium::ADJECTIVES[..]), adverbs: Cow::from(&words::medium::ADVERBS[..]), names: Cow::from(&words::medium::NAMES[..]), } } /// Constructs a new `Petnames` from the large word lists. #[cfg(feature = "default_dictionary")] pub fn large() -> Self { Self { adjectives: Cow::from(&words::large::ADJECTIVES[..]), adverbs: Cow::from(&words::large::ADVERBS[..]), names: Cow::from(&words::large::NAMES[..]), } } /// Constructs a new `Petnames` from the given word lists. /// /// The words are extracted from the given strings by splitting on whitespace. pub fn init(adjectives: &'a str, adverbs: &'a str, names: &'a str) -> Self { Self { adjectives: Cow::Owned(adjectives.split_whitespace().collect::<Vec<_>>()), adverbs: Cow::Owned(adverbs.split_whitespace().collect::<Vec<_>>()), names: Cow::Owned(names.split_whitespace().collect::<Vec<_>>()), } } /// Keep words matching a predicate. /// /// # Examples /// /// ```rust /// # #[cfg(feature = "default_dictionary")] /// let mut petnames = petname::Petnames::default(); /// # #[cfg(feature = "default_dictionary")] /// petnames.retain(|s| s.starts_with("b")); /// # #[cfg(feature = "default_dictionary")] /// # #[cfg(feature = "std_rng")] /// petnames.generate_one(2, "."); /// ``` /// /// This is merely a convenience wrapper that applies the same predicate to
{ self.adjectives.to_mut().retain(|word| predicate(word)); self.adverbs.to_mut().retain(|word| predicate(word)); self.names.to_mut().retain(|word| predicate(word)); } /// Calculate the cardinality of this `Petnames`. /// /// If this is low, names may be repeated by the generator with a higher /// frequency than your use-case may allow. If it is 0 (zero) the generator /// will panic (unless `words` is also zero). /// /// This can saturate. If the total possible combinations of words exceeds /// `u128::MAX` then this will return `u128::MAX`. pub fn cardinality(&self, words: u8) -> u128 { Lists::new(words) .map(|list| match list { List::Adverb => self.adverbs.len() as u128, List::Adjective => self.adjectives.len() as u128, List::Name => self.names.len() as u128, }) .reduce(u128::saturating_mul) .unwrap_or(0u128) } /// Generate a new petname. /// /// # Examples /// /// ```rust /// # #[cfg(all(feature = "std_rng", feature = "default_dictionary"))] /// let mut rng = rand::thread_rng(); /// # #[cfg(all(feature = "std_rng", feature = "default_dictionary"))] /// petname::Petnames::default().generate(&mut rng, 7, ":"); /// ``` /// /// # Notes /// /// This may return fewer words than you request if one or more of the word /// lists are empty. For example, if there are no adverbs, requesting 3 or /// more words may still yield only "doubtful-salmon". /// pub fn generate<RNG>(&self, rng: &mut RNG, words: u8, separator: &str) -> String where RNG: rand::Rng, { Itertools::intersperse( Lists::new(words).filter_map(|list| match list { List::Adverb => self.adverbs.choose(rng).copied(), List::Adjective => self.adjectives.choose(rng).copied(), List::Name => self.names.choose(rng).copied(), }), separator, ) .collect::<String>() } /// Generate a single new petname. /// /// This is like `generate` but uses `rand::thread_rng` as the random /// source. For efficiency use `generate` when creating multiple names, or /// when you want to use a custom source of randomness. #[cfg(feature = "std_rng")] pub fn generate_one(&self, words: u8, separator: &str) -> String { self.generate(&mut rand::thread_rng(), words, separator) } /// Iterator yielding petnames. /// /// # Examples /// /// ```rust /// # #[cfg(all(feature = "std_rng", feature = "default_dictionary"))] /// let mut rng = rand::thread_rng(); /// # #[cfg(all(feature = "std_rng", feature = "default_dictionary"))] /// let petnames = petname::Petnames::default(); /// # #[cfg(all(feature = "std_rng", feature = "default_dictionary"))] /// let mut iter = petnames.iter(&mut rng, 4, "_"); /// # #[cfg(all(feature = "std_rng", feature = "default_dictionary"))] /// println!("name: {}", iter.next().unwrap()); /// ``` /// pub fn iter<RNG>(&'a self, rng: &'a mut RNG, words: u8, separator: &str) -> Names<'a, RNG> where RNG: rand::Rng, { Names { petnames: self, rng, words, separator: separator.to_string() } } /// Iterator yielding unique – i.e. non-repeating – petnames. /// /// # Examples /// /// ```rust /// # #[cfg(all(feature = "std_rng", feature = "default_dictionary"))] /// let mut rng = rand::thread_rng(); /// # #[cfg(all(feature = "std_rng", feature = "default_dictionary"))] /// let petnames = petname::Petnames::default(); /// # #[cfg(all(feature = "std_rng", feature = "default_dictionary"))] /// let mut iter = petnames.iter_non_repeating(&mut rng, 4, "_"); /// # #[cfg(all(feature = "std_rng", feature = "default_dictionary"))] /// println!("name: {}", iter.next().unwrap()); /// ``` /// pub fn iter_non_repeating<RNG>( &'a self, rng: &'a mut RNG, words: u8, separator: &str, ) -> impl Iterator<Item = String> + 'a where RNG: rand::Rng, { let lists: Vec<&'a Words<'a>> = Lists::new(words) .map(|list| match list { List::Adverb => &self.adverbs, List::Adjective => &self.adjectives, List::Name => &self.names, }) .collect(); NamesProduct::shuffled(&lists, rng, separator) } } #[cfg(feature = "default_dictionary")] impl<'a> Default for Petnames<'a> { fn default() -> Self { Self::small() } } /// Enum representing which word list to use. #[derive(Debug, PartialEq)] enum List { Adverb, Adjective, Name, } /// Iterator, yielding which word list to use next. /// /// This yields the appropriate list – [adverbs][List::Adverb], /// [adjectives][List::Adjective]s, [names][List::Name] – from which to select /// a word when constructing a petname of `n` words. For example, if you want 4 /// words in your petname, this will first yield [List::Adverb], then /// [List::Adverb] again, then [List::Adjective], and lastly [List::Name]. #[derive(Debug, PartialEq)] enum Lists { Adverb(u8), Adjective, Name, Done, } impl Lists { fn new(words: u8) -> Self { match words { 0 => Self::Done, 1 => Self::Name, 2 => Self::Adjective, n => Self::Adverb(n - 3), } } fn advance(&mut self) { *self = match self { Self::Adverb(0) => Self::Adjective, Self::Adverb(remaining) => Self::Adverb(*remaining - 1), Self::Adjective => Self::Name, Self::Name | Self::Done => Self::Done, } } } impl Iterator for Lists { type Item = List; fn next(&mut self) -> Option<Self::Item> { let list = match self { Self::Adjective => Some(List::Adjective), Self::Adverb(_) => Some(List::Adverb), Self::Name => Some(List::Name), Self::Done => None, }; self.advance(); list } fn size_hint(&self) -> (usize, Option<usize>) { let remains = match self { Self::Adverb(n) => (n + 3) as usize, Self::Adjective => 2, Self::Name => 1, Self::Done => 0, }; (remains, Some(remains)) } } /// Iterator yielding petnames. pub struct Names<'a, RNG> where RNG: rand::Rng, { petnames: &'a Petnames<'a>, rng: &'a mut RNG, words: u8, separator: String, } impl<'a, RNG> Names<'a, RNG> where RNG: rand::Rng, { /// Calculate the cardinality of this iterator; see `Petnames::cardinality`. #[allow(dead_code)] pub fn cardinality(&self) -> u128 { self.petnames.cardinality(self.words) } } impl<'a, RNG> Iterator for Names<'a, RNG> where RNG: rand::Rng, { type Item = String; fn next(&mut self) -> Option<Self::Item> { Some(self.petnames.generate(self.rng, self.words, &self.separator)) } } /// Iterator yielding petnames from the product of given word lists. /// /// This can be used to ensure that only unique names are produced. struct NamesProduct<'a, ITERATOR> where ITERATOR: Iterator<Item = Option<&'a str>>, { iters: Vec<(ITERATOR, Option<&'a str>)>, separator: String, capacity: usize, } impl<'a> NamesProduct<'a, core::iter::Cycle<alloc::vec::IntoIter<Option<&'a str>>>> { /// Shuffles each of the given `lists` with `rng`, then cycles through the /// product of the lists, joining with `separator`. The leftmost list will /// cycle most rapidly. fn shuffled<RNG>(lists: &[&'a Words<'a>], rng: &'a mut RNG, separator: &str) -> Self where RNG: rand::Rng, { NamesProduct { iters: lists .iter() .map(|words| { let mut list: Vec<Option<&'a str>> = Vec::with_capacity(words.len().saturating_add(1)); list.extend(words.iter().map(|word| Some(*word))); list.shuffle(rng); // Could be expensive. list.push(None); // Cycle marker. (list.into_iter().cycle(), None) }) .collect(), separator: separator.to_string(), capacity: Self::capacity(lists, separator), } } fn capacity(lists: &[&'a Words<'a>], separator: &str) -> usize { ( // Sum of the length of the longest possible word in each word list. lists .iter() .filter_map(|words| words.iter().map(|word| word.len()).max()) .fold(0usize, |sum, len| sum.saturating_add(len)) // The total length of all separators. Careful not to wrap usize. + (separator.len().saturating_mul(lists.len().saturating_sub(1))) ) // Things run _much_ quicker when the capacity is a power of 2. Memory // alignment? If so it may be enough to align at, say, 8 bytes, but this // works for now. .checked_next_power_of_two() // In case there are no lists, or they're all empty... or we have // calculated that we need more than usize::MAX capacity. .unwrap_or(0) } } impl<'a, ITERATOR> Iterator for NamesProduct<'a, ITERATOR> where ITERATOR: Iterator<Item = Option<&'a str>>, { type Item = String; fn next(&mut self) -> Option<Self::Item> { let mut bump = true; // Request advance of next iterator. for (iter, word) in self.iters.iter_mut() { if bump || word.is_none() { match iter.next() { None => { // This shouldn't happen because we expect the iterators // to cycle. However, if it does, we're definitely done. return None; } Some(None) => { // This is the cycle end marker. We want to get another // new word from this iterator, and advance the *next* // iterator too. match iter.next() { None => return None, Some(None) => return None, Some(s) => *word = s, } bump = true } Some(s) => { // We have a new word from this iterator, so we do not // yet need to advance the next iterator. *word = s; bump = false } } } } if bump { // We reached the end of the last iterator, hence we're done. None } else { // We may be able to construct a word! self.iters.iter().try_fold(String::with_capacity(self.capacity), |acc, (_, w)| match (acc, *w) { (s, Some(w)) if s.is_empty() => Some(s + w), (s, Some(w)) => Some(s + &self.separator + w), _ => None, }) } } } #[cfg(test)] mod tests { #[test] fn lists_sequences_adverbs_adjectives_then_names() { let mut lists = super::Lists::new(4); assert_eq!(super::Lists::Adverb(1), lists); assert_eq!(Some(super::List::Adverb), lists.next()); assert_eq!(super::Lists::Adverb(0), lists); assert_eq!(Some(super::List::Adverb), lists.next()); assert_eq!(super::Lists::Adjective, lists); assert_eq!(Some(super::List::Adjective), lists.next()); assert_eq!(super::Lists::Name, lists); assert_eq!(Some(super::List::Name), lists.next()); assert_eq!(super::Lists::Done, lists); assert_eq!(None, lists.next()); } #[test] fn lists_size_hint() { let mut lists = super::Lists::new(3); assert_eq!((3, Some(3)), lists.size_hint()); assert!(lists.next().is_some()); assert_eq!((2, Some(2)), lists.size_hint()); assert!(lists.next().is_some()); assert_eq!((1, Some(1)), lists.size_hint()); assert!(lists.next().is_some()); assert_eq!((0, Some(0)), lists.size_hint()); assert_eq!(None, lists.next()); assert_eq!((0, Some(0)), lists.size_hint()); } }
/// the adjectives, adverbs, and names lists. /// pub fn retain<F>(&mut self, mut predicate: F) where F: FnMut(&str) -> bool,
random_line_split
lib.rs
#![no_std] //! //! You can populate [`Petnames`] with your own word lists, but the word lists //! from upstream [petname](https://github.com/dustinkirkland/petname) are //! included with the `default_dictionary` feature (enabled by default). See //! [`Petnames::small`], [`Petnames::medium`], and [`Petnames::large`] to select //! a particular built-in word list, or use the [`Default`] implementation. //! //! The other thing you need is a random number generator from [rand][]: //! //! ```rust //! # #[cfg(feature = "std_rng")] //! let mut rng = rand::thread_rng(); //! # #[cfg(all(feature = "std_rng", feature = "default_dictionary"))] //! let pname = petname::Petnames::default().generate(&mut rng, 7, ":"); //! ``` //! //! It may be more convenient to use the default random number generator: //! //! ```rust //! # #[cfg(all(feature = "std_rng", feature = "default_dictionary"))] //! let pname = petname::Petnames::default().generate_one(7, ":"); //! ``` //! //! There's a [convenience function][petname] that'll do all of this: //! //! ```rust //! # #[cfg(all(feature = "std_rng", feature = "default_dictionary"))] //! let pname = petname::petname(7, ":"); //! ``` //! //! But the most flexible approach is to create an [`Iterator`] with //! [`iter`][`Petnames::iter`]: //! //! ```rust //! # #[cfg(feature = "std_rng")] //! let mut rng = rand::thread_rng(); //! # #[cfg(feature = "default_dictionary")] //! let petnames = petname::Petnames::default(); //! # #[cfg(all(feature = "std_rng", feature = "default_dictionary"))] //! let ten_thousand_names: Vec<String> = //! petnames.iter(&mut rng, 3, "_").take(10000).collect(); //! ``` //! //! You can modify the word lists to, for example, only use words beginning with //! the letter "b": //! //! ```rust //! # #[cfg(feature = "default_dictionary")] //! let mut petnames = petname::Petnames::default(); //! # #[cfg(feature = "default_dictionary")] //! petnames.retain(|s| s.starts_with("b")); //! # #[cfg(all(feature = "std_rng", feature = "default_dictionary"))] //! petnames.generate_one(3, "."); //! ``` //! extern crate alloc; use alloc::{ borrow::Cow, string::{String, ToString}, vec::Vec, }; use itertools::Itertools; use rand::seq::SliceRandom; /// Convenience function to generate a new petname from default word lists. #[allow(dead_code)] #[cfg(feature = "std_rng")] #[cfg(feature = "default_dictionary")] pub fn petname(words: u8, separator: &str) -> String { Petnames::new().generate_one(words, separator) } /// A word list. pub type Words<'a> = Cow<'a, [&'a str]>; /// Word lists and the logic to combine them into _petnames_. /// /// A _petname_ with `n` words will contain, in order: /// /// * `n - 2` adverbs when `n >= 2`, otherwise 0 adverbs. /// * 1 adjective when `n >= 2`, otherwise 0 adjectives. /// * 1 name / noun when `n >= 1`, otherwise 0 names. /// #[derive(Clone, Debug, Eq, PartialEq)] pub struct Petnames<'a> { pub adjectives: Words<'a>, pub adverbs: Words<'a>, pub names: Words<'a>, } #[cfg(feature = "default_dictionary")] mod words { include!(concat!(env!("OUT_DIR"), "/words.rs")); } impl<'a> Petnames<'a> { /// Constructs a new `Petnames` from the default (small) word lists. #[cfg(feature = "default_dictionary")] pub fn new() -> Self { Self::default() } /// Constructs a new `Petnames` from the small word lists. #[cfg(feature = "default_dictionary")] pub fn small() -> Self { Self { adjectives: Cow::from(&words::small::ADJECTIVES[..]), adverbs: Cow::from(&words::small::ADVERBS[..]), names: Cow::from(&words::small::NAMES[..]), } } /// Constructs a new `Petnames` from the medium word lists. #[cfg(feature = "default_dictionary")] pub fn medium() -> Self { Self { adjectives: Cow::from(&words::medium::ADJECTIVES[..]), adverbs: Cow::from(&words::medium::ADVERBS[..]), names: Cow::from(&words::medium::NAMES[..]), } } /// Constructs a new `Petnames` from the large word lists. #[cfg(feature = "default_dictionary")] pub fn large() -> Self { Self { adjectives: Cow::from(&words::large::ADJECTIVES[..]), adverbs: Cow::from(&words::large::ADVERBS[..]), names: Cow::from(&words::large::NAMES[..]), } } /// Constructs a new `Petnames` from the given word lists. /// /// The words are extracted from the given strings by splitting on whitespace. pub fn init(adjectives: &'a str, adverbs: &'a str, names: &'a str) -> Self { Self { adjectives: Cow::Owned(adjectives.split_whitespace().collect::<Vec<_>>()), adverbs: Cow::Owned(adverbs.split_whitespace().collect::<Vec<_>>()), names: Cow::Owned(names.split_whitespace().collect::<Vec<_>>()), } } /// Keep words matching a predicate. /// /// # Examples /// /// ```rust /// # #[cfg(feature = "default_dictionary")] /// let mut petnames = petname::Petnames::default(); /// # #[cfg(feature = "default_dictionary")] /// petnames.retain(|s| s.starts_with("b")); /// # #[cfg(feature = "default_dictionary")] /// # #[cfg(feature = "std_rng")] /// petnames.generate_one(2, "."); /// ``` /// /// This is merely a convenience wrapper that applies the same predicate to /// the adjectives, adverbs, and names lists. /// pub fn retain<F>(&mut self, mut predicate: F) where F: FnMut(&str) -> bool, { self.adjectives.to_mut().retain(|word| predicate(word)); self.adverbs.to_mut().retain(|word| predicate(word)); self.names.to_mut().retain(|word| predicate(word)); } /// Calculate the cardinality of this `Petnames`. /// /// If this is low, names may be repeated by the generator with a higher /// frequency than your use-case may allow. If it is 0 (zero) the generator /// will panic (unless `words` is also zero). /// /// This can saturate. If the total possible combinations of words exceeds /// `u128::MAX` then this will return `u128::MAX`. pub fn cardinality(&self, words: u8) -> u128 { Lists::new(words) .map(|list| match list { List::Adverb => self.adverbs.len() as u128, List::Adjective => self.adjectives.len() as u128, List::Name => self.names.len() as u128, }) .reduce(u128::saturating_mul) .unwrap_or(0u128) } /// Generate a new petname. /// /// # Examples /// /// ```rust /// # #[cfg(all(feature = "std_rng", feature = "default_dictionary"))] /// let mut rng = rand::thread_rng(); /// # #[cfg(all(feature = "std_rng", feature = "default_dictionary"))] /// petname::Petnames::default().generate(&mut rng, 7, ":"); /// ``` /// /// # Notes /// /// This may return fewer words than you request if one or more of the word /// lists are empty. For example, if there are no adverbs, requesting 3 or /// more words may still yield only "doubtful-salmon". /// pub fn generate<RNG>(&self, rng: &mut RNG, words: u8, separator: &str) -> String where RNG: rand::Rng, { Itertools::intersperse( Lists::new(words).filter_map(|list| match list { List::Adverb => self.adverbs.choose(rng).copied(), List::Adjective => self.adjectives.choose(rng).copied(), List::Name => self.names.choose(rng).copied(), }), separator, ) .collect::<String>() } /// Generate a single new petname. /// /// This is like `generate` but uses `rand::thread_rng` as the random /// source. For efficiency use `generate` when creating multiple names, or /// when you want to use a custom source of randomness. #[cfg(feature = "std_rng")] pub fn generate_one(&self, words: u8, separator: &str) -> String { self.generate(&mut rand::thread_rng(), words, separator) } /// Iterator yielding petnames. /// /// # Examples /// /// ```rust /// # #[cfg(all(feature = "std_rng", feature = "default_dictionary"))] /// let mut rng = rand::thread_rng(); /// # #[cfg(all(feature = "std_rng", feature = "default_dictionary"))] /// let petnames = petname::Petnames::default(); /// # #[cfg(all(feature = "std_rng", feature = "default_dictionary"))] /// let mut iter = petnames.iter(&mut rng, 4, "_"); /// # #[cfg(all(feature = "std_rng", feature = "default_dictionary"))] /// println!("name: {}", iter.next().unwrap()); /// ``` /// pub fn iter<RNG>(&'a self, rng: &'a mut RNG, words: u8, separator: &str) -> Names<'a, RNG> where RNG: rand::Rng, { Names { petnames: self, rng, words, separator: separator.to_string() } } /// Iterator yielding unique – i.e. non-repeating – petnames. /// /// # Examples /// /// ```rust /// # #[cfg(all(feature = "std_rng", feature = "default_dictionary"))] /// let mut rng = rand::thread_rng(); /// # #[cfg(all(feature = "std_rng", feature = "default_dictionary"))] /// let petnames = petname::Petnames::default(); /// # #[cfg(all(feature = "std_rng", feature = "default_dictionary"))] /// let mut iter = petnames.iter_non_repeating(&mut rng, 4, "_"); /// # #[cfg(all(feature = "std_rng", feature = "default_dictionary"))] /// println!("name: {}", iter.next().unwrap()); /// ``` /// pub fn iter_non_repeating<RNG>( &'a self, rng: &'a mut RNG, words: u8, separator: &str, ) -> impl Iterator<Item = String> + 'a where RNG: rand::Rng, { let lists: Vec<&'a Words<'a>> = Lists::new(words) .map(|list| match list { List::Adverb => &self.adverbs, List::Adjective => &self.adjectives, List::Name => &self.names, }) .collect(); NamesProduct::shuffled(&lists, rng, separator) } } #[cfg(feature = "default_dictionary")] impl<'a> Default for Petnames<'a> { fn default() -> Self { Self::small() } } /// Enum representing which word list to use. #[derive(Debug, PartialEq)] enum List { Adverb, Adjective, Name, } /// Iterator, yielding which word list to use next. /// /// This yields the appropriate list – [adverbs][List::Adverb], /// [adjectives][List::Adjective]s, [names][List::Name] – from which to select /// a word when constructing a petname of `n` words. For example, if you want 4 /// words in your petname, this will first yield [List::Adverb], then /// [List::Adverb] again, then [List::Adjective], and lastly [List::Name]. #[derive(Debug, PartialEq)] enum Lists { Adverb(u8), Adjective, Name, Done, } impl Lists { fn new(words: u8) -> Self { match words { 0 => Self::Done, 1 => Self::Name, 2 => Self::Adjective, n => Self::Adverb(n - 3), } } fn advance(&mut self) { *self = match self { Self::Adverb(0) => Self::Adjective, Self::Adverb(remaining) => Self::Adverb(*remaining - 1), Self::Adjective => Self::Name, Self::Name | Self::Done => Self::Done, } } } impl Iterator for Lists { type Item = List; fn next(&mut self) -> Option<Self::Item> {
size_hint(&self) -> (usize, Option<usize>) { let remains = match self { Self::Adverb(n) => (n + 3) as usize, Self::Adjective => 2, Self::Name => 1, Self::Done => 0, }; (remains, Some(remains)) } } /// Iterator yielding petnames. pub struct Names<'a, RNG> where RNG: rand::Rng, { petnames: &'a Petnames<'a>, rng: &'a mut RNG, words: u8, separator: String, } impl<'a, RNG> Names<'a, RNG> where RNG: rand::Rng, { /// Calculate the cardinality of this iterator; see `Petnames::cardinality`. #[allow(dead_code)] pub fn cardinality(&self) -> u128 { self.petnames.cardinality(self.words) } } impl<'a, RNG> Iterator for Names<'a, RNG> where RNG: rand::Rng, { type Item = String; fn next(&mut self) -> Option<Self::Item> { Some(self.petnames.generate(self.rng, self.words, &self.separator)) } } /// Iterator yielding petnames from the product of given word lists. /// /// This can be used to ensure that only unique names are produced. struct NamesProduct<'a, ITERATOR> where ITERATOR: Iterator<Item = Option<&'a str>>, { iters: Vec<(ITERATOR, Option<&'a str>)>, separator: String, capacity: usize, } impl<'a> NamesProduct<'a, core::iter::Cycle<alloc::vec::IntoIter<Option<&'a str>>>> { /// Shuffles each of the given `lists` with `rng`, then cycles through the /// product of the lists, joining with `separator`. The leftmost list will /// cycle most rapidly. fn shuffled<RNG>(lists: &[&'a Words<'a>], rng: &'a mut RNG, separator: &str) -> Self where RNG: rand::Rng, { NamesProduct { iters: lists .iter() .map(|words| { let mut list: Vec<Option<&'a str>> = Vec::with_capacity(words.len().saturating_add(1)); list.extend(words.iter().map(|word| Some(*word))); list.shuffle(rng); // Could be expensive. list.push(None); // Cycle marker. (list.into_iter().cycle(), None) }) .collect(), separator: separator.to_string(), capacity: Self::capacity(lists, separator), } } fn capacity(lists: &[&'a Words<'a>], separator: &str) -> usize { ( // Sum of the length of the longest possible word in each word list. lists .iter() .filter_map(|words| words.iter().map(|word| word.len()).max()) .fold(0usize, |sum, len| sum.saturating_add(len)) // The total length of all separators. Careful not to wrap usize. + (separator.len().saturating_mul(lists.len().saturating_sub(1))) ) // Things run _much_ quicker when the capacity is a power of 2. Memory // alignment? If so it may be enough to align at, say, 8 bytes, but this // works for now. .checked_next_power_of_two() // In case there are no lists, or they're all empty... or we have // calculated that we need more than usize::MAX capacity. .unwrap_or(0) } } impl<'a, ITERATOR> Iterator for NamesProduct<'a, ITERATOR> where ITERATOR: Iterator<Item = Option<&'a str>>, { type Item = String; fn next(&mut self) -> Option<Self::Item> { let mut bump = true; // Request advance of next iterator. for (iter, word) in self.iters.iter_mut() { if bump || word.is_none() { match iter.next() { None => { // This shouldn't happen because we expect the iterators // to cycle. However, if it does, we're definitely done. return None; } Some(None) => { // This is the cycle end marker. We want to get another // new word from this iterator, and advance the *next* // iterator too. match iter.next() { None => return None, Some(None) => return None, Some(s) => *word = s, } bump = true } Some(s) => { // We have a new word from this iterator, so we do not // yet need to advance the next iterator. *word = s; bump = false } } } } if bump { // We reached the end of the last iterator, hence we're done. None } else { // We may be able to construct a word! self.iters.iter().try_fold(String::with_capacity(self.capacity), |acc, (_, w)| match (acc, *w) { (s, Some(w)) if s.is_empty() => Some(s + w), (s, Some(w)) => Some(s + &self.separator + w), _ => None, }) } } } #[cfg(test)] mod tests { #[test] fn lists_sequences_adverbs_adjectives_then_names() { let mut lists = super::Lists::new(4); assert_eq!(super::Lists::Adverb(1), lists); assert_eq!(Some(super::List::Adverb), lists.next()); assert_eq!(super::Lists::Adverb(0), lists); assert_eq!(Some(super::List::Adverb), lists.next()); assert_eq!(super::Lists::Adjective, lists); assert_eq!(Some(super::List::Adjective), lists.next()); assert_eq!(super::Lists::Name, lists); assert_eq!(Some(super::List::Name), lists.next()); assert_eq!(super::Lists::Done, lists); assert_eq!(None, lists.next()); } #[test] fn lists_size_hint() { let mut lists = super::Lists::new(3); assert_eq!((3, Some(3)), lists.size_hint()); assert!(lists.next().is_some()); assert_eq!((2, Some(2)), lists.size_hint()); assert!(lists.next().is_some()); assert_eq!((1, Some(1)), lists.size_hint()); assert!(lists.next().is_some()); assert_eq!((0, Some(0)), lists.size_hint()); assert_eq!(None, lists.next()); assert_eq!((0, Some(0)), lists.size_hint()); } }
let list = match self { Self::Adjective => Some(List::Adjective), Self::Adverb(_) => Some(List::Adverb), Self::Name => Some(List::Name), Self::Done => None, }; self.advance(); list } fn
identifier_body
califa2_2.py
# -*- coding: utf-8 -*- ''' Programa para trabalhar os dados do CALIFA Uma nova abordagem, sem separacao de populacoes estelares Retomando minha pesquisa... Que o Universo me ajude! Versão 2.0 07-dezembro-2016 ------------------ Versão 2.1 22-fevereiro-2017 Adição dos perfis radiais circulares ------------------ Versão 2.2 24-fevereiro-2017 -Adição de uma função para o cálculo da excentricidade da elipse e ângulo de inclinação, utilizadas pra cálculo dos perfis radiais elípticos -Normalização dos raios médios e Semi-eixo maior pelo equivalent_radius -Adição dos std nas medidas ------------------- Versão 2.2.1 06-março-2017 -Cálculo de momentos, parâmetros da elipse e centróides através de módulos a parte -Cálculo dos perfis radiais elipticos ------------------- Versão 2.2.2 -Foco nas análises de Halpha ------------------- Versão 2.2.3 Melhorando os tamanhos dos labels dos eixos x e y, além do título ''' #!/usr/bin/python # -*- coding: utf-8 -*- import pandas as pd import numpy as np import datetime import time from sys import exit from matplotlib import colors, pyplot as plt from functools import reduce import matplotlib.cm as cm import seaborn as sns from astropy.io import ascii, fits from astropy.wcs import wcs from mpl_toolkits.mplot3d import Axes3D from matplotlib import cm from matplotlib.ticker import LinearLocator, FormatStrFormatter from scipy.interpolate import interp2d import cubehelix import matplotlib.mlab as mlab import scipy, pylab import math import momentos as mom from matplotlib.patches import Ellipse import matplotlib as mpl __author__ = 'pnovais' ini=time.time() class bcolors: HEADER = '\033[95m' OKBLUE = '\033[94m' OKGREEN = '\033[92m' WARNING = '\033[93m' FAIL = '\033[91m' ENDC = '\033[0m' BOLD = '\033[1m' UNDERLINE = '\033[4m' PATY = '\033[32m' PINK = '\033[35m' YELLOWs = '\033[33m' def get_image(f_sdss): '''definindo a funcao que ira ler
lcula a Concentracao de uma populacao, usando a definicao de Conselice(2014) http://iopscience.iop.org/article/10.1086/375001/pdf ''' a=1 radius=df.sort_values('raio') r20=radius.iat[int(0.2*len(df)),-1] r80=radius.iat[int(0.8*len(df)),-1] Conc = 5*np.log((r80/r20)) return Conc def Z(df0,gal,Conc,ordem): '''definindo uma funcao para ordenar a propridade de interesse dividindo-o em bins de igual tamanho e calculando alguns parametros''' df_Z = pd.DataFrame() propr = [] err_prop = [] raio = [] err_raio = [] halpha = [] err_halpha = [] dens = [] err_dens = [] idade = [] err_age = [] semia = [] err_semia = [] conc = [] df = df0.sort_values(by=ordem) df = df.reset_index() del df['index'] cx, cy = mom.centro_mass(df) delta = len(df)/50 #Quantidade de bins j=0 for i in range(0,(len(df)), delta): df1 = df.ix[i:i+delta,:] propr.append(df1[ordem].mean()) err_prop.append(df1[ordem].std()) raio.append(df1['raio'].mean()) err_raio.append(df1['raio'].std()) halpha.append(df1['halpha'].mean()) err_halpha.append(df1['halpha'].std()) dens.append(df1['mass'].mean()) err_dens.append(df1['mass'].std()) idade.append(df1['age'].mean()) err_age.append(df1['age'].std()) semia.append(df1['a'].mean()) err_semia.append(df1['a'].std()) conc.append(C(df1)) j=j+1 df_Z[ordem] = propr df_Z['erro'] = err_prop df_Z['raio_m'] = raio df_Z['err_raio'] = err_raio df_Z['age_m'] = idade df_Z['err_age'] = err_age df_Z['mass_m'] = dens df_Z['err_mass'] = err_dens df_Z['halpha_m'] = halpha df_Z['err_halpha'] = err_halpha df_Z['a_m'] = semia df_Z['err_a'] = err_semia df_Z[Conc] = conc return df_Z def obtendo_dados(img,param): '''função para leitura do arquivo fits, criando um dataframe com os dados''' df = pd.DataFrame() nrows, ncols = img.shape xx, yy = np.meshgrid( *np.ogrid[:ncols, :nrows] ) table = np.column_stack(( xx.flatten(), yy.flatten(), img.flatten() )) temp = pd.DataFrame(table, columns=['x','y',param]) df = pd.concat([df,temp], axis=1) return(df) def plots(df,param1,param2,param3,diretorio): '''Função para plotar os gráficos''' plt.figure() incr = param3*(df.ix[:,0].mean()) plt.xlim([(df.ix[:,0].min()-(incr)),(df.ix[:,0].max()+(incr))]) plt.scatter(df.ix[:,0], df.ix[:,12]) plt.title(gal+' '+tipo, fontsize=30) plt.ylabel('Concentraction', fontsize=30) plt.xlabel(param2, fontsize=30) plt.savefig('figures/%s/gal_%s_concentration_%s' %(diretorio,param1,param2)) plt.close() # plt.figure() # plt.title('Distribuicao C(%s)- %s' %(param2,param1)) # df.ix[:,0].hist(bins=100) # plt.savefig('figures/%s/gal%s_hist_%s' %(diretorio,param1,param2)) # plt.close() data_dir = '/home/pnovais/Dropbox/DOUTORADO/renew' age = pd.read_csv('Paty_at_flux__yx/age.csv') mass = pd.read_csv('PatImages/mass.csv') halpha = pd.read_csv('Hamaps/halpha.csv') #halpha = pd.read_csv('Hamaps/teste.csv') hu1 = [] hu2 = [] hu3 = [] hu4 = [] hu5 = [] hu6 = [] hu7 = [] hugal = [] hutype = [] df_hu = pd.DataFrame() for i_gal in range(len(halpha)): #for i_gal in range(0,2): print(bcolors.FAIL +'-'*79+ bcolors.ENDC) print(bcolors.FAIL + '-'*33 + 'OBJETO: %s' %halpha['num_gal'][i_gal] + '-'*33 + bcolors.ENDC) print(bcolors.FAIL +'-'*79+ bcolors.ENDC) plt.close() image_ha = fits.open('Hamaps/%s_%s_Ha.fits' %(halpha['num_gal'][i_gal],halpha['type'][i_gal])) img = get_image(image_ha) #plotando a imagem fits plt.figure(1) plt.clf() cx = cubehelix.cmap(reverse=True, start=0., rot=-0.5) plt.axis([0,77,0,72]) plt.xlabel('X',fontweight='bold') plt.ylabel('Y',fontweight='bold') imgplot = plt.imshow(100*np.log10(img/255), cmap=cx) titulo='Halpha Maps - Galaxy %s ' %halpha['num_gal'][i_gal] plt.title(titulo) #plt.colorbar() figura = 'figures/imagens_Ha/galaxy_%s' %halpha['num_gal'][i_gal] plt.savefig(figura) #obtendo os dados de Halpha da imagem fits df_ha = obtendo_dados(img,'halpha') #obtendo os dados de densidade de massa da imagem fits image_mass = fits.open('PatImages/PatImagesMcorSD__yx_%s.fits' %halpha['num_gal'][i_gal]) img = get_image(image_mass) df_mass = obtendo_dados(img, 'mass') #obtendo os dados de idade da imagem fits image_age = fits.open('Paty_at_flux__yx/at_flux__yx_%s.fits' %halpha['num_gal'][i_gal]) img = get_image(image_age) df_age = obtendo_dados(img, 'age') #selecionando apenas os dados de idade > 0 e mass > 0 df0 = pd.merge(df_age,df_mass) df1 = pd.merge(df0,df_ha, how='inner') df = df1[(df1.age > 0.0) & (df1.mass > 0.0) & (df1.halpha > 0.0)] Re = mom.equivalent_radius(df) cx, cy = mom.centro_mass(df) tetha, exc, a, b = mom.param_elipse(df) df['raio'] = np.sqrt((df['x'] - cx)**2 + (df['y'] - cy)**2) acres = math.radians(180) d = ((df['x'] - cx)*np.cos(tetha) + (df['y'] - cy)*np.sin(-tetha+acres))**2 e = ((df['x'] - cx)*np.sin(tetha) + (df['y'] - cy)*np.cos(-tetha+acres))**2 df['a'] = np.sqrt(d + e/((1-exc)**2)) gal = halpha['num_gal'][i_gal] tipo = halpha['type'][i_gal] age_test = Z(df,gal,'conc_age','age') mass_test = Z(df,gal,'conc_mass','mass') ha_test = Z(df,gal,'conc_ha','halpha') raio_test = Z(df,gal,'conc_raio', 'raio') a_test = Z(df,gal, 'conc_a', 'a') plots(age_test,gal,'Age',0,'concentracao') plots(mass_test,gal,'Mass_density',1,'concentracao') plots(ha_test,gal,'Halpha',1,'concentracao') #perfis circulares plt.figure(1) plt.title(gal) ax1 = plt.subplot(311) plt.title('%s - %s' %(gal, tipo)) ax1.errorbar(raio_test.raio_m/Re, raio_test.age_m, yerr=raio_test.err_age, fmt='o') plt.plot(raio_test.raio_m/Re, raio_test.age_m, color='#7e2601',linewidth=1) plt.ylabel('Mean Age') plt.setp(ax1.get_xticklabels(), visible=False) ax2 = plt.subplot(312, sharex=ax1) plt.ylim([(raio_test.halpha_m.min()-(raio_test.err_halpha.max() + 1e-17)), (raio_test.halpha_m.max()+(raio_test.err_halpha.max() + 1e-17))]) ax2.errorbar(raio_test.raio_m/Re, raio_test.halpha_m, yerr=raio_test.err_halpha, fmt='o') plt.plot(raio_test.raio_m/Re, raio_test.halpha_m, color='#7e2601',linewidth=1) plt.ylabel(r'Mean $H\alpha$') plt.setp(ax2.get_xticklabels(), visible=False) ax3 = plt.subplot(313, sharex=ax2) ax3.errorbar(raio_test.raio_m/Re, raio_test.mass_m, yerr=raio_test.err_mass, fmt='.') plt.plot(raio_test.raio_m/Re, raio_test.mass_m, color='#7e2601',linewidth=1) plt.ylabel('Mean mass density') plt.xlabel('Radius/Re') plt.savefig('figures/perfis_circular/gal%s_perfis_circ' %(gal)) plt.close(1) plt.figure() plt.scatter(raio_test.raio_m/Re, raio_test.conc_raio) plt.plot(raio_test.raio_m/Re, raio_test.conc_raio, color='#7e2601',linewidth=1) plt.title(gal) plt.ylabel('Concentraction') plt.xlabel('Raio/Re') plt.savefig('figures/perfis_circular/gal%s_perfil_concentracao_circ' %(gal)) plt.close() #perfis elipticos plt.figure(1) plt.title(gal) ax1 = plt.subplot(311) plt.title('%s - %s' %(gal, tipo)) ax1.errorbar(a_test.a_m/Re, a_test.age_m, yerr=a_test.err_age, fmt='o') plt.scatter(a_test.a_m/Re, a_test.age_m) plt.plot(a_test.a_m/Re, a_test.age_m, color='#7e2601',linewidth=1) plt.ylabel('Mean Age') plt.setp(ax1.get_xticklabels(), visible=False) ax2 = plt.subplot(312, sharex=ax1) plt.ylim([(a_test.halpha_m.min()-(a_test.err_halpha.max() + 1e-17)), (a_test.halpha_m.max()+(a_test.err_halpha.max() + 1e-17))]) ax2.errorbar(a_test.a_m/Re, a_test.halpha_m, yerr=a_test.err_halpha, fmt='o') plt.plot(a_test.a_m/Re, a_test.halpha_m, color='#7e2601',linewidth=1) plt.ylabel('Mean Halpha') plt.setp(ax2.get_xticklabels(), visible=False) ax3 = plt.subplot(313, sharex=ax2) ax3.errorbar(a_test.a_m/Re, a_test.mass_m, yerr=a_test.err_mass, fmt='.') plt.plot(a_test.a_m/Re, a_test.mass_m, color='#7e2601',linewidth=1) plt.ylabel('Mean mass density') plt.xlabel('Semi-eixo a/Re') plt.savefig('figures/perfis_eliptico/gal%s_perfis_elip' %(gal)) plt.close(1) plt.figure() plt.scatter(a_test.a_m/Re, a_test.conc_a) plt.plot(a_test.a_m/Re, a_test.conc_a, color='#7e2601',linewidth=1) plt.title(gal) plt.ylabel('Concentraction') plt.xlabel('Semi-eixo a/Re') plt.savefig('figures/perfis_eliptico/gal%s_perfil_concentracao_elip' %(gal)) plt.close() mean = [cx,cy] width = 2*a height = 2*b angle = math.degrees(tetha) ell = mpl.patches.Ellipse(xy=mean, width=width, height=height, angle = 180+angle, alpha=0.2, color='black') fig, ax = plt.subplots() ax.add_patch(ell) ax.autoscale() df2 = df.ix[(df.a > a/3) & (df.a < (a/3 + 2))] df3 = df.ix[(df.a > a/2) & (df.a < (a/2 + 2))] df4 = df.ix[(df.a > a) & (df.a < (a + 2))] plt.scatter(df.x,df.y, c='red', s=10, alpha=0.7) plt.scatter(df2.x,df2.y, c='blue') plt.scatter(df3.x,df3.y, c='purple') plt.scatter(df4.x, df4.y, c='green') plt.savefig('figures/ajuste_elipse/gal%s_elipses' %(gal)) plt.close() print('excentricidade = %f' %exc) print('inclinacao = %f' %(math.degrees(tetha))) print('#%d' %i_gal) hu = mom.hu_moments(df) hu1.append(hu[0]) hu2.append(hu[1]) hu3.append(hu[2]) hu4.append(hu[3]) hu5.append(hu[4]) hu6.append(hu[5]) hu7.append(hu[6]) hugal.append(gal) hutype.append(tipo) #graficos apenas de Halpha plt.figure(1) #Imagem em Ha plt.clf() cx = cubehelix.cmap(reverse=True, start=0., rot=-0.5) plt.axis([0,77,0,72]) plt.xlabel('X',fontweight='bold', fontsize=30) plt.ylabel('Y',fontweight='bold', fontsize=30) imgplot = plt.imshow(100*np.log10(img/255), cmap=cx) titulo='Halpha Maps %s %s' %(halpha['num_gal'][i_gal],tipo) plt.title(titulo, fontsize=30) #plt.colorbar() figura = 'figures/Ha_analysis/gal_%s' %halpha['num_gal'][i_gal] plt.savefig(figura) plots(ha_test,gal,'Halpha',1,'Ha_analysis') #C(Z) plt.figure() #Z(a) plt.ylim([(a_test.halpha_m.min()-(a_test.err_halpha.max() + 1e-17)), (a_test.halpha_m.max()+(a_test.err_halpha.max() + 1e-17))]) plt.errorbar(a_test.a_m/Re, a_test.halpha_m, yerr=a_test.err_halpha, fmt='o') plt.plot(a_test.a_m/Re, a_test.halpha_m, color='#7e2601',linewidth=1) plt.ylabel('Mean Halpha', fontsize=30) plt.xlabel('Semi-eixo a/Re', fontsize=30) titulo2='%s %s' %(halpha['num_gal'][i_gal],tipo) plt.title(titulo2, fontsize=30) plt.savefig('figures/Ha_analysis/gal_%s_perfis_elip' %(gal)) df_hu['gal'] = hugal df_hu['tipo'] = hutype df_hu['hu1'] = hu1 df_hu['hu2'] = hu2 df_hu['hu3'] = hu3 df_hu['hu4'] = hu4 df_hu['hu5'] = hu5 df_hu['hu6'] = hu6 df_hu['hu7'] = hu7 #df_hu['hu1','hu2','hu3','hu4','hu5','hu6','hu7'] = hu1 df_hu.to_csv('hu_moments_gal.csv', index=False) fim = time.time() time_proc = fim - ini print('') #print(bcolors.FAIL +'-'*79+ bcolors.ENDC) print(bcolors.OKBLUE + 'tempo de processamento: %fs' %time_proc + bcolors.ENDC)
as imagens fits''' img = f_sdss[0].data return img def C(df): '''funcao que ca
identifier_body
califa2_2.py
# -*- coding: utf-8 -*- ''' Programa para trabalhar os dados do CALIFA Uma nova abordagem, sem separacao de populacoes estelares Retomando minha pesquisa... Que o Universo me ajude! Versão 2.0 07-dezembro-2016 ------------------ Versão 2.1 22-fevereiro-2017 Adição dos perfis radiais circulares ------------------ Versão 2.2 24-fevereiro-2017 -Adição de uma função para o cálculo da excentricidade da elipse e ângulo de inclinação, utilizadas pra cálculo dos perfis radiais elípticos -Normalização dos raios médios e Semi-eixo maior pelo equivalent_radius -Adição dos std nas medidas ------------------- Versão 2.2.1 06-março-2017 -Cálculo de momentos, parâmetros da elipse e centróides através de módulos a parte -Cálculo dos perfis radiais elipticos ------------------- Versão 2.2.2 -Foco nas análises de Halpha ------------------- Versão 2.2.3 Melhorando os tamanhos dos labels dos eixos x e y, além do título ''' #!/usr/bin/python # -*- coding: utf-8 -*- import pandas as pd import numpy as np import datetime import time from sys import exit from matplotlib import colors, pyplot as plt from functools import reduce import matplotlib.cm as cm import seaborn as sns from astropy.io import ascii, fits from astropy.wcs import wcs from mpl_toolkits.mplot3d import Axes3D from matplotlib import cm from matplotlib.ticker import LinearLocator, FormatStrFormatter from scipy.interpolate import interp2d import cubehelix import matplotlib.mlab as mlab import scipy, pylab import math import momentos as mom from matplotlib.patches import Ellipse import matplotlib as mpl __author__ = 'pnovais' ini=time.time() class bcolors: HEADER = '\033[95m' OKBLUE = '\033[94m' OKGREEN = '\033[92m' WARNING = '\033[93m' FAIL = '\033[91m' ENDC = '\033[0m' BOLD = '\033[1m' UNDERLINE = '\033[4m' PATY = '\033[32m' PINK = '\033[35m' YELLOWs = '\033[33m' def get_image(f_sdss): '''definindo a funcao que ira ler as imagens fits''' img = f_sdss[0].data return img def C(df): '''funcao que calcula a Concentracao de uma populacao, usando a definicao de Conselice(2014) http://iopscience.iop.org/article/10.1086/375001/pdf ''' a=1 radius=df.sort_values('raio') r20=radius.iat[int(0.2*len(df)),-1] r80=radius.iat[int(0.8*len(df)),-1] Conc = 5*np.log((r80/r20)) return Conc def Z(df0,gal,Conc,ordem): '''definindo uma funcao para ordenar a propridade de interesse dividindo-o em bins de igual tamanho e calculando alguns parametros''' df_Z = pd.DataFrame() propr = [] err_prop = [] raio = [] err_raio = [] halpha = [] err_halpha = [] dens = [] err_dens = [] idade = [] err_age = [] semia = [] err_semia = [] conc = [] df = df0.sort_values(by=ordem) df = df.reset_index() del df['index'] cx, cy = mom.centro_mass(df) delta = len(df)/50 #Quantidade de bins j=0 for i in range(0,(len(df)), delta): df1 = df.ix[i:i+delta,:] propr.append(df1[ordem].mean()) err_prop.append(df1[ordem].std()) raio.append(df1['raio'].mean()) err_raio.append(df1['raio'].std()) halpha.append(df1['halpha'].mean()) err_halpha.append(df1['halpha'].std()) dens.append(df1['mass'].mean()) err_dens.append(df1['mass'].std()) idade.append(df1['age'].mean()) err_age.append(df1['age'].std()) semia.append(df1['a'].mean()) err_semia.append(df1['a'].std()) conc.append(C(df1)) j=j+1 df_Z[ordem] = propr df_Z['erro'] = err_prop df_Z['raio_m'] = raio df_Z['err_raio'] = err_raio df_Z['age_m'] = idade df_Z['err_age'] = err_age df_Z['mass_m'] = dens df_Z['err_mass'] = err_dens df_Z['halpha_m'] = halpha df_Z['err_halpha'] = err_halpha df_Z['a_m'] = semia df_Z['err_a'] = err_semia df_Z[Conc] = conc return df_Z def obtendo_dados(img,param): '''função para leitura do arquivo fits, criando um dataframe com os dados''' df = pd.DataFrame() nrows, ncols = img.shape xx, yy = np.meshgrid( *np.ogrid[:ncols, :nrows] ) table = np.column_stack(( xx.flatten(), yy.flatten(), img.flatten() )) temp = pd.DataFrame(table, columns=['x','y',param]) df = pd.concat([df,temp], axis=1) return(df) def plots(df,param1,param2,param3,diretorio): '''Função para plotar os gráficos''' plt.figure() incr = param3*(df.ix[:,0].mean()) plt.xlim([(df.ix[:,0].min()-(incr)),(df.ix[:,0].max()+(incr))]) plt.scatter(df.ix[:,0], df.ix[:,12]) plt.title(gal+' '+tipo, fontsize=30) plt.ylabel('Concentraction', fontsize=30) plt.xlabel(param2, fontsize=30) plt.savefig('figures/%s/gal_%s_concentration_%s' %(diretorio,param1,param2)) plt.close() # plt.figure() # plt.title('Distribuicao C(%s)- %s' %(param2,param1)) # df.ix[:,0].hist(bins=100) # plt.savefig('figures/%s/gal%s_hist_%s' %(diretorio,param1,param2)) # plt.close() data_dir = '/home/pnovais/Dropbox/DOUTORADO/renew' age = pd.read_csv('Paty_at_flux__yx/age.csv') mass = pd.read_csv('PatImages/mass.csv') halpha = pd.read_csv('Hamaps/halpha.csv') #halpha = pd.read_csv('Hamaps/teste.csv') hu1 = [] hu2 = [] hu3 = [] hu4 = [] hu5 = [] hu6 = [] hu7 = [] hugal = [] hutype = [] df_hu = pd.DataFrame() for i_gal in range(len(halpha)): #for i_gal in range(0,2): print(bcolors.FAIL +'-'*79+ bcolors.ENDC) print(bcolors.FAIL + '-'*33 + 'OBJETO: %s' %halpha['num_gal'][i_gal] + '-'*33 + bcolors.ENDC) print(bcolors.FAIL +'-'*79+ bcolors.ENDC) plt.close() image_ha = fits.open('Hamaps/%s_%s_Ha.fits' %(halpha['num_gal'][i_gal],halpha['type'][i_gal])) img = get_image(image_ha) #plotando a imagem fits plt.figure(1) plt.clf() cx = cubehelix.cmap(reverse=True, start=0., rot=-0.5) plt.axis([0,77,0,72]) plt.xlabel('X',fontweight='bold') plt.ylabel('Y',fontweight='bold') imgplot = plt.imshow(100*np.log10(img/255), cmap=cx) titulo='Halpha Maps - Galaxy %s ' %halpha['num_gal'][i_gal] plt.title(titulo) #plt.colorbar() figura = 'figures/imagens_Ha/galaxy_%s' %halpha['num_gal'][i_gal] plt.savefig(figura) #obtendo os dados de Halpha da imagem fits df_ha = obtendo_dados(img,'halpha') #obtendo os dados de densidade de massa da imagem fits image_mass = fits.open('PatImages/PatImagesMcorSD__yx_%s.fits' %halpha['num_gal'][i_gal]) img = get_image(image_mass) df_mass = obtendo_dados(img, 'mass') #obtendo os dados de idade da imagem fits image_age = fits.open('Paty_at_flux__yx/at_flux__yx_%s.fits' %halpha['num_gal'][i_gal]) img = get_image(image_age) df_age = obtendo_dados(img, 'age') #selecionando apenas os dados de idade > 0 e mass > 0 df0 = pd.merge(df_age,df_mass) df1 = pd.merge(df0,df_ha, how='inner') df = df1[(df1.age > 0.0) & (df1.mass > 0.0) & (df1.halpha > 0.0)] Re = mom.equivalent_radius(df) cx, cy = mom.centro_mass(df) tetha, exc, a, b = mom.param_elipse(df) df['raio'] = np.sqrt((df['x'] - cx)**2 + (df['y'] - cy)**2) acres = math.radians(180) d = ((df['x'] - cx)*np.cos(tetha) + (df['y'] - cy)*np.sin(-tetha+acres))**2 e = ((df['x'] - cx)*np.sin(tetha) + (df['y'] - cy)*np.cos(-tetha+acres))**2 df['a'] = np.sqrt(d + e/((1-exc)**2)) gal = halpha['num_gal'][i_gal] tipo = halpha['type'][i_gal] age_test = Z(df,gal,'conc_age','age') mass_test = Z(df,gal,'conc_mass','mass') ha_test = Z(df,gal,'conc_ha','halpha') raio_test = Z(df,gal,'conc_raio', 'raio') a_test = Z(df,gal, 'conc_a', 'a') plots(age_test,gal,'Age',0,'concentracao') plots(mass_test,gal,'Mass_density',1,'concentracao') plots(ha_test,gal,'Halpha',1,'concentracao') #perfis circulares plt.figure(1) plt.title(gal) ax1 = plt.subplot(311) plt.title('%s - %s' %(gal, tipo)) ax1.errorbar(raio_test.raio_m/Re, raio_test.age_m, yerr=raio_test.err_age, fmt='o') plt.plot(raio_test.raio_m/Re, raio_test.age_m, color='#7e2601',linewidth=1) plt.ylabel('Mean Age') plt.setp(ax1.get_xticklabels(), visible=False) ax2 = plt.subplot(312, sharex=ax1) plt.ylim([(raio_test.halpha_m.min()-(raio_test.err_halpha.max() + 1e-17)), (raio_test.halpha_m.max()+(raio_test.err_halpha.max() + 1e-17))]) ax2.errorbar(raio_test.raio_m/Re, raio_test.halpha_m, yerr=raio_test.err_halpha, fmt='o') plt.plot(raio_test.raio_m/Re, raio_test.halpha_m, color='#7e2601',linewidth=1) plt.ylabel(r'Mean $H\alpha$') plt.setp(ax2.get_xticklabels(), visible=False) ax3 = plt.subplot(313, sharex=ax2) ax3.errorbar(raio_test.raio_m/Re, raio_test.mass_m, yerr=raio_test.err_mass, fmt='.') plt.plot(raio_test.raio_m/Re, raio_test.mass_m, color='#7e2601',linewidth=1) plt.ylabel('Mean mass density') plt.xlabel('Radius/Re') plt.savefig('figures/perfis_circular/gal%s_perfis_circ' %(gal)) plt.close(1) plt.figure() plt.scatter(raio_test.raio_m/Re, raio_test.conc_raio) plt.plot(raio_test.raio_m/Re, raio_test.conc_raio, color='#7e2601',linewidth=1) plt.title(gal) plt.ylabel('Concentraction') plt.xlabel('Raio/Re') plt.savefig('figures/perfis_circular/gal%s_perfil_concentracao_circ' %(gal)) plt.close() #perfis elipticos plt.figure(1) plt.title(gal) ax1 = plt.subplot(311) plt.title('%s - %s' %(gal, tipo)) ax1.errorbar(a_test.a_m/Re, a_test.age_m, yerr=a_test.err_age, fmt='o') plt.scatter(a_test.a_m/Re, a_test.age_m) plt.plot(a_test.a_m/Re, a_test.age_m, color='#7e2601',linewidth=1) plt.ylabel('Mean Age') plt.setp(ax1.get_xticklabels(), visible=False) ax2 = plt.subplot(312, sharex=ax1) plt.ylim([(a_test.halpha_m.min()-(a_test.err_halpha.max() + 1e-17)), (a_test.halpha_m.max()+(a_test.err_halpha.max() + 1e-17))]) ax2.errorbar(a_test.a_m/Re, a_test.halpha_m, yerr=a_test.err_halpha, fmt='o') plt.plot(a_test.a_m/Re, a_test.halpha_m, color='#7e2601',linewidth=1) plt.ylabel('Mean Halpha') plt.setp(ax2.get_xticklabels(), visible=False) ax3 = plt.subplot(313, sharex=ax2) ax3.errorbar(a_test.a_m/Re, a_test.mass_m, yerr=a_test.err_mass, fmt='.') plt.plot(a_test.a_m/Re, a_test.mass_m, color='#7e2601',linewidth=1) plt.ylabel('Mean mass density') plt.xlabel('Semi-eixo a/Re') plt.savefig('figures/perfis_eliptico/gal%s_perfis_elip' %(gal)) plt.close(1) plt.figure() plt.scatter(a_test.a_m/Re, a_test.conc_a) plt.plot(a_test.a_m/Re, a_test.conc_a, color='#7e2601',linewidth=1) plt.title(gal) plt.ylabel('Concentraction') plt.xlabel('Semi-eixo a/Re') plt.savefig('figures/perfis_eliptico/gal%s_perfil_concentracao_elip' %(gal)) plt.close() mean = [cx,cy] width = 2*a height = 2*b angle = math.degrees(tetha) ell = mpl.patches.Ellipse(xy=mean, width=width, height=height, angle = 180+angle, alpha=0.2, color='black') fig, ax = plt.subplots() ax.add_patch(ell) ax.autoscale() df2 = df.ix[(df.a > a/3) & (df.a < (a/3 + 2))] df3 = df.ix[(df.a > a/2) & (df.a < (a/2 + 2))] df4 = df.ix[(df.a > a) & (df.a < (a + 2))] plt.scatter(df.x,df.y, c='red', s=10, alpha=0.7) plt.scatter(df2.x,df2.y, c='blue') plt.scatter(df3.x,df3.y, c='purple') plt.scatter(df4.x, df4.y, c='green') plt.savefig('figures/ajuste_elipse/gal%s_elipses' %(gal)) plt.close()
hu = mom.hu_moments(df) hu1.append(hu[0]) hu2.append(hu[1]) hu3.append(hu[2]) hu4.append(hu[3]) hu5.append(hu[4]) hu6.append(hu[5]) hu7.append(hu[6]) hugal.append(gal) hutype.append(tipo) #graficos apenas de Halpha plt.figure(1) #Imagem em Ha plt.clf() cx = cubehelix.cmap(reverse=True, start=0., rot=-0.5) plt.axis([0,77,0,72]) plt.xlabel('X',fontweight='bold', fontsize=30) plt.ylabel('Y',fontweight='bold', fontsize=30) imgplot = plt.imshow(100*np.log10(img/255), cmap=cx) titulo='Halpha Maps %s %s' %(halpha['num_gal'][i_gal],tipo) plt.title(titulo, fontsize=30) #plt.colorbar() figura = 'figures/Ha_analysis/gal_%s' %halpha['num_gal'][i_gal] plt.savefig(figura) plots(ha_test,gal,'Halpha',1,'Ha_analysis') #C(Z) plt.figure() #Z(a) plt.ylim([(a_test.halpha_m.min()-(a_test.err_halpha.max() + 1e-17)), (a_test.halpha_m.max()+(a_test.err_halpha.max() + 1e-17))]) plt.errorbar(a_test.a_m/Re, a_test.halpha_m, yerr=a_test.err_halpha, fmt='o') plt.plot(a_test.a_m/Re, a_test.halpha_m, color='#7e2601',linewidth=1) plt.ylabel('Mean Halpha', fontsize=30) plt.xlabel('Semi-eixo a/Re', fontsize=30) titulo2='%s %s' %(halpha['num_gal'][i_gal],tipo) plt.title(titulo2, fontsize=30) plt.savefig('figures/Ha_analysis/gal_%s_perfis_elip' %(gal)) df_hu['gal'] = hugal df_hu['tipo'] = hutype df_hu['hu1'] = hu1 df_hu['hu2'] = hu2 df_hu['hu3'] = hu3 df_hu['hu4'] = hu4 df_hu['hu5'] = hu5 df_hu['hu6'] = hu6 df_hu['hu7'] = hu7 #df_hu['hu1','hu2','hu3','hu4','hu5','hu6','hu7'] = hu1 df_hu.to_csv('hu_moments_gal.csv', index=False) fim = time.time() time_proc = fim - ini print('') #print(bcolors.FAIL +'-'*79+ bcolors.ENDC) print(bcolors.OKBLUE + 'tempo de processamento: %fs' %time_proc + bcolors.ENDC)
print('excentricidade = %f' %exc) print('inclinacao = %f' %(math.degrees(tetha))) print('#%d' %i_gal)
random_line_split
califa2_2.py
# -*- coding: utf-8 -*- ''' Programa para trabalhar os dados do CALIFA Uma nova abordagem, sem separacao de populacoes estelares Retomando minha pesquisa... Que o Universo me ajude! Versão 2.0 07-dezembro-2016 ------------------ Versão 2.1 22-fevereiro-2017 Adição dos perfis radiais circulares ------------------ Versão 2.2 24-fevereiro-2017 -Adição de uma função para o cálculo da excentricidade da elipse e ângulo de inclinação, utilizadas pra cálculo dos perfis radiais elípticos -Normalização dos raios médios e Semi-eixo maior pelo equivalent_radius -Adição dos std nas medidas ------------------- Versão 2.2.1 06-março-2017 -Cálculo de momentos, parâmetros da elipse e centróides através de módulos a parte -Cálculo dos perfis radiais elipticos ------------------- Versão 2.2.2 -Foco nas análises de Halpha ------------------- Versão 2.2.3 Melhorando os tamanhos dos labels dos eixos x e y, além do título ''' #!/usr/bin/python # -*- coding: utf-8 -*- import pandas as pd import numpy as np import datetime import time from sys import exit from matplotlib import colors, pyplot as plt from functools import reduce import matplotlib.cm as cm import seaborn as sns from astropy.io import ascii, fits from astropy.wcs import wcs from mpl_toolkits.mplot3d import Axes3D from matplotlib import cm from matplotlib.ticker import LinearLocator, FormatStrFormatter from scipy.interpolate import interp2d import cubehelix import matplotlib.mlab as mlab import scipy, pylab import math import momentos as mom from matplotlib.patches import Ellipse import matplotlib as mpl __author__ = 'pnovais' ini=time.time() class bcolors: HEADER = '\033[95m' OKBLUE = '\033[94m' OKGREEN = '\033[92m' WARNING = '\033[93m' FAIL = '\033[91m' ENDC = '\033[0m' BOLD = '\033[1m' UNDERLINE = '\033[4m' PATY = '\033[32m' PINK = '\033[35m' YELLOWs = '\033[33m' def get_image(f_sdss): '''definindo a funcao que ira ler as imagens fits''' img = f_sdss[0].data return img def C(df): '''funcao que calcula a Concentracao de uma populacao, usando a definicao de Conselice(2014) http://iopscience.iop.org/article/10.1086/375001/pdf ''' a=1 radius=df.sort_values('raio') r20=radius.iat[int(0.2*len(df)),-1] r80=radius.iat[int(0.8*len(df)),-1] Conc = 5*np.log((r80/r20)) return Conc def Z(df0,gal,Conc,ordem): '''definindo uma funcao para ordenar a propridade de interesse dividindo-o em bins de igual tamanho e calculando alguns parametros''' df_Z = pd.DataFrame() propr = [] err_prop = [] raio = [] err_raio = [] halpha = [] err_halpha = [] dens = [] err_dens = [] idade = [] err_age = [] semia = [] err_semia = [] conc = [] df = df0.sort_values(by=ordem) df = df.reset_index() del df['index'] cx, cy = mom.centro_mass(df) delta = len(df)/50 #Quantidade de bins j=0 for i in range(0,(len(df)), delta): df1 = df.ix[i:i+delta,:] propr.append(df1[ordem].mean()) err_prop.append(df1[ordem].std()) raio.append(df1['raio'].mean()) err_raio.append(df1['raio'].std()) halpha.append(df1['halpha'].mean()) err_halpha.append(df1['halpha'].std()) dens.append(df1['mass'].mean()) err_dens.append(df1['mass'].std()) idade.append(df1['age'].mean()) err_age.append(df1['age'].std()) semia.append(df1['a'].mean()) err_semia.append(df1['a'].std()) conc.append(C(df1)) j=j+1 df_Z[ordem] = propr df_Z['erro'] = err_prop df_Z['raio_m'] = raio df_Z['err_raio'] = err_raio df_Z['age_m'] = idade df_Z['err_age'] = err_age df_Z['mass_m'] = dens df_Z['err_mass'] = err_dens df_Z['halpha_m'] = halpha df_Z['err_halpha'] = err_halpha df_Z['a_m'] = semia df_Z['err_a'] = err_semia df_Z[Conc] = conc return df_Z def obtendo_dados(img,param): '''
eitura do arquivo fits, criando um dataframe com os dados''' df = pd.DataFrame() nrows, ncols = img.shape xx, yy = np.meshgrid( *np.ogrid[:ncols, :nrows] ) table = np.column_stack(( xx.flatten(), yy.flatten(), img.flatten() )) temp = pd.DataFrame(table, columns=['x','y',param]) df = pd.concat([df,temp], axis=1) return(df) def plots(df,param1,param2,param3,diretorio): '''Função para plotar os gráficos''' plt.figure() incr = param3*(df.ix[:,0].mean()) plt.xlim([(df.ix[:,0].min()-(incr)),(df.ix[:,0].max()+(incr))]) plt.scatter(df.ix[:,0], df.ix[:,12]) plt.title(gal+' '+tipo, fontsize=30) plt.ylabel('Concentraction', fontsize=30) plt.xlabel(param2, fontsize=30) plt.savefig('figures/%s/gal_%s_concentration_%s' %(diretorio,param1,param2)) plt.close() # plt.figure() # plt.title('Distribuicao C(%s)- %s' %(param2,param1)) # df.ix[:,0].hist(bins=100) # plt.savefig('figures/%s/gal%s_hist_%s' %(diretorio,param1,param2)) # plt.close() data_dir = '/home/pnovais/Dropbox/DOUTORADO/renew' age = pd.read_csv('Paty_at_flux__yx/age.csv') mass = pd.read_csv('PatImages/mass.csv') halpha = pd.read_csv('Hamaps/halpha.csv') #halpha = pd.read_csv('Hamaps/teste.csv') hu1 = [] hu2 = [] hu3 = [] hu4 = [] hu5 = [] hu6 = [] hu7 = [] hugal = [] hutype = [] df_hu = pd.DataFrame() for i_gal in range(len(halpha)): #for i_gal in range(0,2): print(bcolors.FAIL +'-'*79+ bcolors.ENDC) print(bcolors.FAIL + '-'*33 + 'OBJETO: %s' %halpha['num_gal'][i_gal] + '-'*33 + bcolors.ENDC) print(bcolors.FAIL +'-'*79+ bcolors.ENDC) plt.close() image_ha = fits.open('Hamaps/%s_%s_Ha.fits' %(halpha['num_gal'][i_gal],halpha['type'][i_gal])) img = get_image(image_ha) #plotando a imagem fits plt.figure(1) plt.clf() cx = cubehelix.cmap(reverse=True, start=0., rot=-0.5) plt.axis([0,77,0,72]) plt.xlabel('X',fontweight='bold') plt.ylabel('Y',fontweight='bold') imgplot = plt.imshow(100*np.log10(img/255), cmap=cx) titulo='Halpha Maps - Galaxy %s ' %halpha['num_gal'][i_gal] plt.title(titulo) #plt.colorbar() figura = 'figures/imagens_Ha/galaxy_%s' %halpha['num_gal'][i_gal] plt.savefig(figura) #obtendo os dados de Halpha da imagem fits df_ha = obtendo_dados(img,'halpha') #obtendo os dados de densidade de massa da imagem fits image_mass = fits.open('PatImages/PatImagesMcorSD__yx_%s.fits' %halpha['num_gal'][i_gal]) img = get_image(image_mass) df_mass = obtendo_dados(img, 'mass') #obtendo os dados de idade da imagem fits image_age = fits.open('Paty_at_flux__yx/at_flux__yx_%s.fits' %halpha['num_gal'][i_gal]) img = get_image(image_age) df_age = obtendo_dados(img, 'age') #selecionando apenas os dados de idade > 0 e mass > 0 df0 = pd.merge(df_age,df_mass) df1 = pd.merge(df0,df_ha, how='inner') df = df1[(df1.age > 0.0) & (df1.mass > 0.0) & (df1.halpha > 0.0)] Re = mom.equivalent_radius(df) cx, cy = mom.centro_mass(df) tetha, exc, a, b = mom.param_elipse(df) df['raio'] = np.sqrt((df['x'] - cx)**2 + (df['y'] - cy)**2) acres = math.radians(180) d = ((df['x'] - cx)*np.cos(tetha) + (df['y'] - cy)*np.sin(-tetha+acres))**2 e = ((df['x'] - cx)*np.sin(tetha) + (df['y'] - cy)*np.cos(-tetha+acres))**2 df['a'] = np.sqrt(d + e/((1-exc)**2)) gal = halpha['num_gal'][i_gal] tipo = halpha['type'][i_gal] age_test = Z(df,gal,'conc_age','age') mass_test = Z(df,gal,'conc_mass','mass') ha_test = Z(df,gal,'conc_ha','halpha') raio_test = Z(df,gal,'conc_raio', 'raio') a_test = Z(df,gal, 'conc_a', 'a') plots(age_test,gal,'Age',0,'concentracao') plots(mass_test,gal,'Mass_density',1,'concentracao') plots(ha_test,gal,'Halpha',1,'concentracao') #perfis circulares plt.figure(1) plt.title(gal) ax1 = plt.subplot(311) plt.title('%s - %s' %(gal, tipo)) ax1.errorbar(raio_test.raio_m/Re, raio_test.age_m, yerr=raio_test.err_age, fmt='o') plt.plot(raio_test.raio_m/Re, raio_test.age_m, color='#7e2601',linewidth=1) plt.ylabel('Mean Age') plt.setp(ax1.get_xticklabels(), visible=False) ax2 = plt.subplot(312, sharex=ax1) plt.ylim([(raio_test.halpha_m.min()-(raio_test.err_halpha.max() + 1e-17)), (raio_test.halpha_m.max()+(raio_test.err_halpha.max() + 1e-17))]) ax2.errorbar(raio_test.raio_m/Re, raio_test.halpha_m, yerr=raio_test.err_halpha, fmt='o') plt.plot(raio_test.raio_m/Re, raio_test.halpha_m, color='#7e2601',linewidth=1) plt.ylabel(r'Mean $H\alpha$') plt.setp(ax2.get_xticklabels(), visible=False) ax3 = plt.subplot(313, sharex=ax2) ax3.errorbar(raio_test.raio_m/Re, raio_test.mass_m, yerr=raio_test.err_mass, fmt='.') plt.plot(raio_test.raio_m/Re, raio_test.mass_m, color='#7e2601',linewidth=1) plt.ylabel('Mean mass density') plt.xlabel('Radius/Re') plt.savefig('figures/perfis_circular/gal%s_perfis_circ' %(gal)) plt.close(1) plt.figure() plt.scatter(raio_test.raio_m/Re, raio_test.conc_raio) plt.plot(raio_test.raio_m/Re, raio_test.conc_raio, color='#7e2601',linewidth=1) plt.title(gal) plt.ylabel('Concentraction') plt.xlabel('Raio/Re') plt.savefig('figures/perfis_circular/gal%s_perfil_concentracao_circ' %(gal)) plt.close() #perfis elipticos plt.figure(1) plt.title(gal) ax1 = plt.subplot(311) plt.title('%s - %s' %(gal, tipo)) ax1.errorbar(a_test.a_m/Re, a_test.age_m, yerr=a_test.err_age, fmt='o') plt.scatter(a_test.a_m/Re, a_test.age_m) plt.plot(a_test.a_m/Re, a_test.age_m, color='#7e2601',linewidth=1) plt.ylabel('Mean Age') plt.setp(ax1.get_xticklabels(), visible=False) ax2 = plt.subplot(312, sharex=ax1) plt.ylim([(a_test.halpha_m.min()-(a_test.err_halpha.max() + 1e-17)), (a_test.halpha_m.max()+(a_test.err_halpha.max() + 1e-17))]) ax2.errorbar(a_test.a_m/Re, a_test.halpha_m, yerr=a_test.err_halpha, fmt='o') plt.plot(a_test.a_m/Re, a_test.halpha_m, color='#7e2601',linewidth=1) plt.ylabel('Mean Halpha') plt.setp(ax2.get_xticklabels(), visible=False) ax3 = plt.subplot(313, sharex=ax2) ax3.errorbar(a_test.a_m/Re, a_test.mass_m, yerr=a_test.err_mass, fmt='.') plt.plot(a_test.a_m/Re, a_test.mass_m, color='#7e2601',linewidth=1) plt.ylabel('Mean mass density') plt.xlabel('Semi-eixo a/Re') plt.savefig('figures/perfis_eliptico/gal%s_perfis_elip' %(gal)) plt.close(1) plt.figure() plt.scatter(a_test.a_m/Re, a_test.conc_a) plt.plot(a_test.a_m/Re, a_test.conc_a, color='#7e2601',linewidth=1) plt.title(gal) plt.ylabel('Concentraction') plt.xlabel('Semi-eixo a/Re') plt.savefig('figures/perfis_eliptico/gal%s_perfil_concentracao_elip' %(gal)) plt.close() mean = [cx,cy] width = 2*a height = 2*b angle = math.degrees(tetha) ell = mpl.patches.Ellipse(xy=mean, width=width, height=height, angle = 180+angle, alpha=0.2, color='black') fig, ax = plt.subplots() ax.add_patch(ell) ax.autoscale() df2 = df.ix[(df.a > a/3) & (df.a < (a/3 + 2))] df3 = df.ix[(df.a > a/2) & (df.a < (a/2 + 2))] df4 = df.ix[(df.a > a) & (df.a < (a + 2))] plt.scatter(df.x,df.y, c='red', s=10, alpha=0.7) plt.scatter(df2.x,df2.y, c='blue') plt.scatter(df3.x,df3.y, c='purple') plt.scatter(df4.x, df4.y, c='green') plt.savefig('figures/ajuste_elipse/gal%s_elipses' %(gal)) plt.close() print('excentricidade = %f' %exc) print('inclinacao = %f' %(math.degrees(tetha))) print('#%d' %i_gal) hu = mom.hu_moments(df) hu1.append(hu[0]) hu2.append(hu[1]) hu3.append(hu[2]) hu4.append(hu[3]) hu5.append(hu[4]) hu6.append(hu[5]) hu7.append(hu[6]) hugal.append(gal) hutype.append(tipo) #graficos apenas de Halpha plt.figure(1) #Imagem em Ha plt.clf() cx = cubehelix.cmap(reverse=True, start=0., rot=-0.5) plt.axis([0,77,0,72]) plt.xlabel('X',fontweight='bold', fontsize=30) plt.ylabel('Y',fontweight='bold', fontsize=30) imgplot = plt.imshow(100*np.log10(img/255), cmap=cx) titulo='Halpha Maps %s %s' %(halpha['num_gal'][i_gal],tipo) plt.title(titulo, fontsize=30) #plt.colorbar() figura = 'figures/Ha_analysis/gal_%s' %halpha['num_gal'][i_gal] plt.savefig(figura) plots(ha_test,gal,'Halpha',1,'Ha_analysis') #C(Z) plt.figure() #Z(a) plt.ylim([(a_test.halpha_m.min()-(a_test.err_halpha.max() + 1e-17)), (a_test.halpha_m.max()+(a_test.err_halpha.max() + 1e-17))]) plt.errorbar(a_test.a_m/Re, a_test.halpha_m, yerr=a_test.err_halpha, fmt='o') plt.plot(a_test.a_m/Re, a_test.halpha_m, color='#7e2601',linewidth=1) plt.ylabel('Mean Halpha', fontsize=30) plt.xlabel('Semi-eixo a/Re', fontsize=30) titulo2='%s %s' %(halpha['num_gal'][i_gal],tipo) plt.title(titulo2, fontsize=30) plt.savefig('figures/Ha_analysis/gal_%s_perfis_elip' %(gal)) df_hu['gal'] = hugal df_hu['tipo'] = hutype df_hu['hu1'] = hu1 df_hu['hu2'] = hu2 df_hu['hu3'] = hu3 df_hu['hu4'] = hu4 df_hu['hu5'] = hu5 df_hu['hu6'] = hu6 df_hu['hu7'] = hu7 #df_hu['hu1','hu2','hu3','hu4','hu5','hu6','hu7'] = hu1 df_hu.to_csv('hu_moments_gal.csv', index=False) fim = time.time() time_proc = fim - ini print('') #print(bcolors.FAIL +'-'*79+ bcolors.ENDC) print(bcolors.OKBLUE + 'tempo de processamento: %fs' %time_proc + bcolors.ENDC)
função para l
identifier_name
califa2_2.py
# -*- coding: utf-8 -*- ''' Programa para trabalhar os dados do CALIFA Uma nova abordagem, sem separacao de populacoes estelares Retomando minha pesquisa... Que o Universo me ajude! Versão 2.0 07-dezembro-2016 ------------------ Versão 2.1 22-fevereiro-2017 Adição dos perfis radiais circulares ------------------ Versão 2.2 24-fevereiro-2017 -Adição de uma função para o cálculo da excentricidade da elipse e ângulo de inclinação, utilizadas pra cálculo dos perfis radiais elípticos -Normalização dos raios médios e Semi-eixo maior pelo equivalent_radius -Adição dos std nas medidas ------------------- Versão 2.2.1 06-março-2017 -Cálculo de momentos, parâmetros da elipse e centróides através de módulos a parte -Cálculo dos perfis radiais elipticos ------------------- Versão 2.2.2 -Foco nas análises de Halpha ------------------- Versão 2.2.3 Melhorando os tamanhos dos labels dos eixos x e y, além do título ''' #!/usr/bin/python # -*- coding: utf-8 -*- import pandas as pd import numpy as np import datetime import time from sys import exit from matplotlib import colors, pyplot as plt from functools import reduce import matplotlib.cm as cm import seaborn as sns from astropy.io import ascii, fits from astropy.wcs import wcs from mpl_toolkits.mplot3d import Axes3D from matplotlib import cm from matplotlib.ticker import LinearLocator, FormatStrFormatter from scipy.interpolate import interp2d import cubehelix import matplotlib.mlab as mlab import scipy, pylab import math import momentos as mom from matplotlib.patches import Ellipse import matplotlib as mpl __author__ = 'pnovais' ini=time.time() class bcolors: HEADER = '\033[95m' OKBLUE = '\033[94m' OKGREEN = '\033[92m' WARNING = '\033[93m' FAIL = '\033[91m' ENDC = '\033[0m' BOLD = '\033[1m' UNDERLINE = '\033[4m' PATY = '\033[32m' PINK = '\033[35m' YELLOWs = '\033[33m' def get_image(f_sdss): '''definindo a funcao que ira ler as imagens fits''' img = f_sdss[0].data return img def C(df): '''funcao que calcula a Concentracao de uma populacao, usando a definicao de Conselice(2014) http://iopscience.iop.org/article/10.1086/375001/pdf ''' a=1 radius=df.sort_values('raio') r20=radius.iat[int(0.2*len(df)),-1] r80=radius.iat[int(0.8*len(df)),-1] Conc = 5*np.log((r80/r20)) return Conc def Z(df0,gal,Conc,ordem): '''definindo uma funcao para ordenar a propridade de interesse dividindo-o em bins de igual tamanho e calculando alguns parametros''' df_Z = pd.DataFrame() propr = [] err_prop = [] raio = [] err_raio = [] halpha = [] err_halpha = [] dens = [] err_dens = [] idade = [] err_age = [] semia = [] err_semia = [] conc = [] df = df0.sort_values(by=ordem) df = df.reset_index() del df['index'] cx, cy = mom.centro_mass(df) delta = len(df)/50 #Quantidade de bins j=0 for i in range(0,(len(df)), delta): df1 = df.ix[i:i+delta,:] propr.append(df1[ordem].mean()) err_prop.append(df1[ordem].std()) raio.append(df1['raio'].mean()) err_raio.append(df1['raio'].std()) halpha.append(df1['halpha'].mean()) err_halpha.append(df1['halpha'].std()) dens.append(df1['mass'].mean()) err_dens.append(df1['mass'].std()) idade.append(df1['age'].mean()) err_age.append(df1['age'].std()) semia.append(df1['a'].mean()) err_semia.append(df1['a'].std()) conc.append(C(df1)) j=j+1 df_Z[ordem] = propr df_Z['erro'] = err_prop df_Z['raio_m'] = raio df_Z['err_raio'] = err_raio df_Z['age_m'] = idade df_Z['err_age'] = err_age df_Z['mass_m'] = dens df_Z['err_mass'] = err_dens df_Z['halpha_m'] = halpha df_Z['err_halpha'] = err_halpha df_Z['a_m'] = semia df_Z['err_a'] = err_semia df_Z[Conc] = conc return df_Z def obtendo_dados(img,param): '''função para leitura do arquivo fits, criando um dataframe com os dados''' df = pd.DataFrame() nrows, ncols = img.shape xx, yy = np.meshgrid( *np.ogrid[:ncols, :nrows] ) table = np.column_stack(( xx.flatten(), yy.flatten(), img.flatten() )) temp = pd.DataFrame(table, columns=['x','y',param]) df = pd.concat([df,temp], axis=1) return(df) def plots(df,param1,param2,param3,diretorio): '''Função para plotar os gráficos''' plt.figure() incr = param3*(df.ix[:,0].mean()) plt.xlim([(df.ix[:,0].min()-(incr)),(df.ix[:,0].max()+(incr))]) plt.scatter(df.ix[:,0], df.ix[:,12]) plt.title(gal+' '+tipo, fontsize=30) plt.ylabel('Concentraction', fontsize=30) plt.xlabel(param2, fontsize=30) plt.savefig('figures/%s/gal_%s_concentration_%s' %(diretorio,param1,param2)) plt.close() # plt.figure() # plt.title('Distribuicao C(%s)- %s' %(param2,param1)) # df.ix[:,0].hist(bins=100) # plt.savefig('figures/%s/gal%s_hist_%s' %(diretorio,param1,param2)) # plt.close() data_dir = '/home/pnovais/Dropbox/DOUTORADO/renew' age = pd.read_csv('Paty_at_flux__yx/age.csv') mass = pd.read_csv('PatImages/mass.csv') halpha = pd.read_csv('Hamaps/halpha.csv') #halpha = pd.read_csv('Hamaps/teste.csv') hu1 = [] hu2 = [] hu3 = [] hu4 = [] hu5 = [] hu6 = [] hu7 = [] hugal = [] hutype = [] df_hu = pd.DataFrame() for i_gal in range(len(halpha)): #for i_gal in range(0,2): print(bcolors.FAIL +'-'*79+ bcolors.EN
] = hutype df_hu['hu1'] = hu1 df_hu['hu2'] = hu2 df_hu['hu3'] = hu3 df_hu['hu4'] = hu4 df_hu['hu5'] = hu5 df_hu['hu6'] = hu6 df_hu['hu7'] = hu7 #df_hu['hu1','hu2','hu3','hu4','hu5','hu6','hu7'] = hu1 df_hu.to_csv('hu_moments_gal.csv', index=False) fim = time.time() time_proc = fim - ini print('') #print(bcolors.FAIL +'-'*79+ bcolors.ENDC) print(bcolors.OKBLUE + 'tempo de processamento: %fs' %time_proc + bcolors.ENDC)
DC) print(bcolors.FAIL + '-'*33 + 'OBJETO: %s' %halpha['num_gal'][i_gal] + '-'*33 + bcolors.ENDC) print(bcolors.FAIL +'-'*79+ bcolors.ENDC) plt.close() image_ha = fits.open('Hamaps/%s_%s_Ha.fits' %(halpha['num_gal'][i_gal],halpha['type'][i_gal])) img = get_image(image_ha) #plotando a imagem fits plt.figure(1) plt.clf() cx = cubehelix.cmap(reverse=True, start=0., rot=-0.5) plt.axis([0,77,0,72]) plt.xlabel('X',fontweight='bold') plt.ylabel('Y',fontweight='bold') imgplot = plt.imshow(100*np.log10(img/255), cmap=cx) titulo='Halpha Maps - Galaxy %s ' %halpha['num_gal'][i_gal] plt.title(titulo) #plt.colorbar() figura = 'figures/imagens_Ha/galaxy_%s' %halpha['num_gal'][i_gal] plt.savefig(figura) #obtendo os dados de Halpha da imagem fits df_ha = obtendo_dados(img,'halpha') #obtendo os dados de densidade de massa da imagem fits image_mass = fits.open('PatImages/PatImagesMcorSD__yx_%s.fits' %halpha['num_gal'][i_gal]) img = get_image(image_mass) df_mass = obtendo_dados(img, 'mass') #obtendo os dados de idade da imagem fits image_age = fits.open('Paty_at_flux__yx/at_flux__yx_%s.fits' %halpha['num_gal'][i_gal]) img = get_image(image_age) df_age = obtendo_dados(img, 'age') #selecionando apenas os dados de idade > 0 e mass > 0 df0 = pd.merge(df_age,df_mass) df1 = pd.merge(df0,df_ha, how='inner') df = df1[(df1.age > 0.0) & (df1.mass > 0.0) & (df1.halpha > 0.0)] Re = mom.equivalent_radius(df) cx, cy = mom.centro_mass(df) tetha, exc, a, b = mom.param_elipse(df) df['raio'] = np.sqrt((df['x'] - cx)**2 + (df['y'] - cy)**2) acres = math.radians(180) d = ((df['x'] - cx)*np.cos(tetha) + (df['y'] - cy)*np.sin(-tetha+acres))**2 e = ((df['x'] - cx)*np.sin(tetha) + (df['y'] - cy)*np.cos(-tetha+acres))**2 df['a'] = np.sqrt(d + e/((1-exc)**2)) gal = halpha['num_gal'][i_gal] tipo = halpha['type'][i_gal] age_test = Z(df,gal,'conc_age','age') mass_test = Z(df,gal,'conc_mass','mass') ha_test = Z(df,gal,'conc_ha','halpha') raio_test = Z(df,gal,'conc_raio', 'raio') a_test = Z(df,gal, 'conc_a', 'a') plots(age_test,gal,'Age',0,'concentracao') plots(mass_test,gal,'Mass_density',1,'concentracao') plots(ha_test,gal,'Halpha',1,'concentracao') #perfis circulares plt.figure(1) plt.title(gal) ax1 = plt.subplot(311) plt.title('%s - %s' %(gal, tipo)) ax1.errorbar(raio_test.raio_m/Re, raio_test.age_m, yerr=raio_test.err_age, fmt='o') plt.plot(raio_test.raio_m/Re, raio_test.age_m, color='#7e2601',linewidth=1) plt.ylabel('Mean Age') plt.setp(ax1.get_xticklabels(), visible=False) ax2 = plt.subplot(312, sharex=ax1) plt.ylim([(raio_test.halpha_m.min()-(raio_test.err_halpha.max() + 1e-17)), (raio_test.halpha_m.max()+(raio_test.err_halpha.max() + 1e-17))]) ax2.errorbar(raio_test.raio_m/Re, raio_test.halpha_m, yerr=raio_test.err_halpha, fmt='o') plt.plot(raio_test.raio_m/Re, raio_test.halpha_m, color='#7e2601',linewidth=1) plt.ylabel(r'Mean $H\alpha$') plt.setp(ax2.get_xticklabels(), visible=False) ax3 = plt.subplot(313, sharex=ax2) ax3.errorbar(raio_test.raio_m/Re, raio_test.mass_m, yerr=raio_test.err_mass, fmt='.') plt.plot(raio_test.raio_m/Re, raio_test.mass_m, color='#7e2601',linewidth=1) plt.ylabel('Mean mass density') plt.xlabel('Radius/Re') plt.savefig('figures/perfis_circular/gal%s_perfis_circ' %(gal)) plt.close(1) plt.figure() plt.scatter(raio_test.raio_m/Re, raio_test.conc_raio) plt.plot(raio_test.raio_m/Re, raio_test.conc_raio, color='#7e2601',linewidth=1) plt.title(gal) plt.ylabel('Concentraction') plt.xlabel('Raio/Re') plt.savefig('figures/perfis_circular/gal%s_perfil_concentracao_circ' %(gal)) plt.close() #perfis elipticos plt.figure(1) plt.title(gal) ax1 = plt.subplot(311) plt.title('%s - %s' %(gal, tipo)) ax1.errorbar(a_test.a_m/Re, a_test.age_m, yerr=a_test.err_age, fmt='o') plt.scatter(a_test.a_m/Re, a_test.age_m) plt.plot(a_test.a_m/Re, a_test.age_m, color='#7e2601',linewidth=1) plt.ylabel('Mean Age') plt.setp(ax1.get_xticklabels(), visible=False) ax2 = plt.subplot(312, sharex=ax1) plt.ylim([(a_test.halpha_m.min()-(a_test.err_halpha.max() + 1e-17)), (a_test.halpha_m.max()+(a_test.err_halpha.max() + 1e-17))]) ax2.errorbar(a_test.a_m/Re, a_test.halpha_m, yerr=a_test.err_halpha, fmt='o') plt.plot(a_test.a_m/Re, a_test.halpha_m, color='#7e2601',linewidth=1) plt.ylabel('Mean Halpha') plt.setp(ax2.get_xticklabels(), visible=False) ax3 = plt.subplot(313, sharex=ax2) ax3.errorbar(a_test.a_m/Re, a_test.mass_m, yerr=a_test.err_mass, fmt='.') plt.plot(a_test.a_m/Re, a_test.mass_m, color='#7e2601',linewidth=1) plt.ylabel('Mean mass density') plt.xlabel('Semi-eixo a/Re') plt.savefig('figures/perfis_eliptico/gal%s_perfis_elip' %(gal)) plt.close(1) plt.figure() plt.scatter(a_test.a_m/Re, a_test.conc_a) plt.plot(a_test.a_m/Re, a_test.conc_a, color='#7e2601',linewidth=1) plt.title(gal) plt.ylabel('Concentraction') plt.xlabel('Semi-eixo a/Re') plt.savefig('figures/perfis_eliptico/gal%s_perfil_concentracao_elip' %(gal)) plt.close() mean = [cx,cy] width = 2*a height = 2*b angle = math.degrees(tetha) ell = mpl.patches.Ellipse(xy=mean, width=width, height=height, angle = 180+angle, alpha=0.2, color='black') fig, ax = plt.subplots() ax.add_patch(ell) ax.autoscale() df2 = df.ix[(df.a > a/3) & (df.a < (a/3 + 2))] df3 = df.ix[(df.a > a/2) & (df.a < (a/2 + 2))] df4 = df.ix[(df.a > a) & (df.a < (a + 2))] plt.scatter(df.x,df.y, c='red', s=10, alpha=0.7) plt.scatter(df2.x,df2.y, c='blue') plt.scatter(df3.x,df3.y, c='purple') plt.scatter(df4.x, df4.y, c='green') plt.savefig('figures/ajuste_elipse/gal%s_elipses' %(gal)) plt.close() print('excentricidade = %f' %exc) print('inclinacao = %f' %(math.degrees(tetha))) print('#%d' %i_gal) hu = mom.hu_moments(df) hu1.append(hu[0]) hu2.append(hu[1]) hu3.append(hu[2]) hu4.append(hu[3]) hu5.append(hu[4]) hu6.append(hu[5]) hu7.append(hu[6]) hugal.append(gal) hutype.append(tipo) #graficos apenas de Halpha plt.figure(1) #Imagem em Ha plt.clf() cx = cubehelix.cmap(reverse=True, start=0., rot=-0.5) plt.axis([0,77,0,72]) plt.xlabel('X',fontweight='bold', fontsize=30) plt.ylabel('Y',fontweight='bold', fontsize=30) imgplot = plt.imshow(100*np.log10(img/255), cmap=cx) titulo='Halpha Maps %s %s' %(halpha['num_gal'][i_gal],tipo) plt.title(titulo, fontsize=30) #plt.colorbar() figura = 'figures/Ha_analysis/gal_%s' %halpha['num_gal'][i_gal] plt.savefig(figura) plots(ha_test,gal,'Halpha',1,'Ha_analysis') #C(Z) plt.figure() #Z(a) plt.ylim([(a_test.halpha_m.min()-(a_test.err_halpha.max() + 1e-17)), (a_test.halpha_m.max()+(a_test.err_halpha.max() + 1e-17))]) plt.errorbar(a_test.a_m/Re, a_test.halpha_m, yerr=a_test.err_halpha, fmt='o') plt.plot(a_test.a_m/Re, a_test.halpha_m, color='#7e2601',linewidth=1) plt.ylabel('Mean Halpha', fontsize=30) plt.xlabel('Semi-eixo a/Re', fontsize=30) titulo2='%s %s' %(halpha['num_gal'][i_gal],tipo) plt.title(titulo2, fontsize=30) plt.savefig('figures/Ha_analysis/gal_%s_perfis_elip' %(gal)) df_hu['gal'] = hugal df_hu['tipo'
conditional_block
modbase.py
#!/usr/bin/env python # :noTabs=true: """ remotely download from ModBase internal method "download_models_from_modbase" is the real winner other methods are for reloading afterwards (if necessary) so...some quirks, there seem to be multiple "user levels" including a distinction between "public" and "academic"...this seems odd to me so for now just looking at the default (public?) interface... or academic is default? reference the modeller.py script for scoring info...sorta... or nope there may be other data that is extractable from ModBase...however this is a bit obfuscated, for now, just focus on downloading the models and alignments todo: rework: download, then extract! add in quality features... Author: Evan H. Baugh """ ################################################################################ # IMPORT # common modules import os import urllib2 from xml.dom.minidom import parse as xml_parse import shutil import optparse # bigger modules # custom modules #from helper import create_directory , copy_file # place these simple helper scripts here instead, stand alone! ################################################################################ # HELPER METHODS # not very interesting, just ignore these # here because I like defining my own ways for these utilities which are in # another module...but you only care about ModBase stuff right? so # just put a copy here, that simple... # helper for creating a directory, checks and delets existing name def create_directory( dir_name , tagline = ' to sort the data' ): """ Creates the directory <dir_name> WARNING: this will delete the directory and its contents if it already exists! Optionally output something special in <tagline> """ # check if it exists print 'Creating a new directory ' + os.path.relpath( dir_name ) + tagline if os.path.isdir( dir_name ): print 'a directory named ' + os.path.relpath( dir_name ) + ' already exists, deleting it now...' shutil.rmtree( dir_name ) os.mkdir( dir_name ) # copy helper def copy_file( filename , destination , display = False ): """ Copy <filename> to/into <destination> just a cp wrapper...what? """ if display: # optional if os.path.isdir( destination ): print 'placing a copy of ' + os.path.relpath( filename ) + ' into the ' + os.path.relpath( destination ) + ' directory' elif os.path.isfile( destination ): print 'copying ' + os.path.relpath( filename ) + ' to ' + os.path.relpath( destination ) shutil.copy( filename , destination ) ################################################################################ # METHODS # woohoo! def download_models_from_modbase( query , out_directory = 'modbase_models' , root_filename = '' , dataset = '' , get_alignment = True , write_summary = True , display = True ): """ REQUIRES INTERNET CONNECTION Returns "details" on the models for <query> in ModBase write results to <out_directory> with the base <root_filename> Optionally request models from a specific <dataset> Optionally <get_alingment> too (as PIR file) Optionally <display> a summary of the results Optionally <write_summary> of the models (human readable, also displays) ModBase documentation claims that the interface can accept: databaseID database ID, let's use UniProt dataset a particular ModBase run? modelID same? seqID same? dataset the ModWeb JobID... type "model" or "alignment", this method handles this and that any of the first 4 is enough to identify the target (?) ...for simplicity, let's just look using UniProt IDs as "databaseIDs" apparently to use "non-public" access additional work must be done (something about a "cookies.txt" file, though this seems specific to "wget", may be able to pass in user/password as "modbase_user" and "modbase_passwd") uses xml.dom.minidom to parse the HTML returned...this may not be kosher... but it works...and is easier than using htmllib or sgmllib...(?) """ # url url = 'http://salilab.org/modbase/retrieve/modbase' # format the search query print 'searching modbase for \"' + query +'\"' url += '?databaseID=' + query # currently unused...so why put it here? #for i in search_options.keys(): # url += '&' + i +'='+ search_options[i] # the dataset # if not 'dataset' in search_options.keys() and dataset: if dataset: url += '&dataset=' + dataset # go get the results print 'obtaining model results from:\n\t' + url raw_stream = urllib2.urlopen( url + '&type=model' ) print 'finished downloading models, summarizing the results...' # parse the results results = xml_parse( raw_stream ) # check if empty if not len( results.toxml() ) > 100: # ahhh! I hate arbitrary numbers!!! print 'no models exist in ModBase for this protein...' return {} # get the ids #ids = get_str_from_xml_tag( results , 'model_id' ) # no need, in the header of the model # get the models models = get_str_from_xml_tag( results , 'content' ) # extract the details details , text = get_modbase_model_details( models , display or write_summary , export = True ) # defaults for writing files if not root_filename: root_filename = 'modbase_' + query # optionally write the models if out_directory: create_directory( out_directory , ' to store the models as PDB files' ) print 'writing the downloaded models to ' + out_directory count = 1 filenames = [] for i in models: # write it filename = out_directory + '/' + root_filename + '_model_' + str( count ) + '.pdb' filenames.append( os.path.abspath( filename ) ) # write the alignment f = open( filename , 'w' ) f.write( i.strip() ) f.close() count += 1 # change this in this case models = filenames # SOOO HACKY!!!! # for later safety... out_directory += '/' # optionally grab the alignment too if get_alignment: print 'also downloading the alignments...' raw_aln_stream = urllib2.urlopen( url + '&type=alignment' ) # parse the results aln_results = xml_parse( raw_aln_stream ) # get the files aln_results = aln_results.getElementsByTagName( 'alignmentfile' ) # ...for now, just get the text itself # don't worry about the other details in the XML file print 'writing the alignments as PIR files...' count = 1 for i in aln_results: i = get_str_from_xml_tag( i , 'content' )[0] # just 1, always the first # if out_directory is empty...this will just do as we want filename = out_directory + root_filename + '_model_' + str( count ) + '_alignment.pir' f = open( filename , 'w' ) f.write( i ) f.close() # convert them? # doesn't seem to load these "pir" files...? :( # save in the details? details[count - 1]['alignment'] = i count += 1 # put the models (filenames) into details...cleaner output, just 1 dict for i in xrange( len( models ) ): details[i]['coordinates'] = models[i] # find the "best" model temp = '\nevaluating the \"best\" model by comparing:\n\t1. sequence identity\n\t2. model score\n\t3. target length' print temp text += temp +'\n' best_score = max( [i['sequence identity'] for i in details] ) matches = [i for i in details if i['sequence identity'] == best_score] if len( matches ) > 1 and sum( [not i['model score'] == matches[0]['model score'] for i in matches[1:]] ): # find the best model score best_score = max( [i['model score'] for i in details] ) matches = [i for i in details if i['model score'] == best_score] if len( matches ) > 1 and sum( [not i['target length'] == matches[0]['target length'] for i in matches[1:]] ):
# debug output if len( matches ) > 1: temp = 'multiple models are \"equally the best\":' print temp text += temp +'\n' for i in matches: temp = '\t'+ i['coordinates'] print temp text += temp +'\n' temp = 'copying the first on to best_model.pdb' print temp text += temp +'\n' else: temp = 'best model: ' + matches[0]['coordinates'] print temp text += temp # move it to a indicative filename copy_file( matches[0]['coordinates'] , out_directory + '/best_model.pdb' ) # optionally write a summary file if write_summary: # if out_directory is empty...this will just do as we want filename = out_directory + root_filename + '_summary.txt' f = open( filename , 'w' ) f.write( text ) f.close() # just the details, has everything else... return details # very hacky wrapper def get_str_from_xml_tag( xml_object , tag ): """ So...I don't have time to learn proper XML parsing with the Python "xml" library right now and this approach works...so yeah simply return a list of str for the target <tag> in <xml_object> """ # get it results = xml_object.getElementsByTagName( tag ) # convert to string L = len( tag ) results = [i.toxml()[L + 2:-(L + 3)].strip() for i in results] return results # useful simple text parsing def extract_model_details_from_modbase_header( modbase_model_text ): """ Returns a dict of the model details from <modbase_model_text> this includes the PDB template, coverage details (always continuous), and alignment/modeling details """ # setup defaults, cleaner display details = { 'model' : '?' , 'organism' : '?' , 'experiment' : '?' , 'method' : '?' , 'program' : '?' , 'sequence identity' : 0 , 'model score' : 0 , 'evalue' : 0 , 'template' : '?' , 'template chain' : '?' , 'template coverage' : [] , 'target length record' : 0 , 'target coverage' : [] , 'template length' : 0 , 'target length' : 0 , 'ModPipe run' : '?' , 'modelID' : '?' , 'alignmentID' : '?' } # over the lines for i in modbase_model_text.split( '\n' ): if i[:4] == 'ATOM': # done! end of the header break elif i[:6] == 'HEADER': details['model'] = str( i.replace( 'HEADER' , '' ).strip() ) #elif i[:5] == 'TITLE': # ...uh, the ones I looked at, this was useless... elif i[:6] == 'SOURCE': details['organism'] = str( i.replace( 'SOURCE' , '' ).strip() ) #elif i[:6] == 'AUTHOR': # don't care about authors for now... elif i[:10] == 'REMARK 220': i = str( i.replace( 'REMARK 220' , '' ).strip() ) # keep sorting... if i[:16] == 'EXPERIMENT TYPE:': details['experiment'] = str( i.replace( 'EXPERIMENT TYPE:' , '' ).strip() ).capitalize() elif i[:7] == 'METHOD:': details['method'] = str( i.replace( 'METHOD:' , '' ).strip() ).capitalize() elif i[:8] == 'PROGRAM:': details['program'] = str( i.replace( 'PROGRAM:' , '' ).strip() ) elif i[:18] == 'SEQUENCE IDENTITY:': # as fraction please details['sequence identity'] = float( i.replace( 'SEQUENCE IDENTITY:' , '' ).strip() )/100 elif i[:12] == 'MODEL SCORE:': # as float details['model score'] = float( i.replace( 'MODEL SCORE:' , '' ).strip() ) elif i[:7] == 'EVALUE:': # as float details['evalue'] = float( i.replace( 'EVALUE:' , '' ).strip() ) elif i[:13] == 'TEMPLATE PDB:': details['template'] = str( i.replace( 'TEMPLATE PDB:' , '' ).strip().upper() ) elif i[:15] == 'TEMPLATE CHAIN:': details['template chain'] = str( i.replace( 'TEMPLATE CHAIN:' , '' ).strip() ) elif i[:15] == 'TEMPLATE BEGIN:': details['template coverage'].append( int( i.replace( 'TEMPLATE BEGIN:' , '' ).strip() ) ) elif i[:13] == 'TEMPLATE END:': details['template coverage'].append( int( i.replace( 'TEMPLATE END:' , '' ).strip() ) ) elif i[:14] == 'TARGET LENGTH:': details['target length record'] = int( i.replace( 'TARGET LENGTH:' , '' ).strip() ) elif i[:13] == 'TARGET BEGIN:': details['target coverage'].append( int( i.replace( 'TARGET BEGIN:' , '' ).strip() ) ) elif i[:11] == 'TARGET END:': details['target coverage'].append( int( i.replace( 'TARGET END:' , '' ).strip() ) ) elif i[:12] == 'MODPIPE RUN:': details['ModPipe run'] = str( i.replace( 'MODPIPE RUN:' , '' ).strip() ) elif i[:17] == 'MODPIPE MODEL ID:': details['modelID'] = str( i.replace( 'MODPIPE MODEL ID:' , '' ).strip() ) elif i[:21] == 'MODPIPE ALIGNMENT ID:': details['alignmentID'] = str( i.replace( 'MODPIPE ALIGNMENT ID:' , '' ).strip() ) # for own sanity details['template coverage'].sort() details['template length'] = details['template coverage'][1] - details['template coverage'][0] + 1 details['target coverage'].sort() details['target length'] = details['target coverage'][1] - details['target coverage'][0] + 1 return details # silly interactive method def display_modbase_model_details( details , include_run_details = False , export = False ): """ Displays a summary of the ModBase model <details> Optionally <include_run_details> Optionally <export> the summary text """ # check the input if isinstance( details , str ): # assume it just needs to be parsed out details = extract_model_details_from_modbase_header( details ) # exit condition if 'FAIL' in details.keys(): text = details['model'] +'\n' text += 'FAILED: ' + details['FAIL'] +'\n' print text[:-1] return text[:-1] # make the string text = details['model'] +'\n' text += 'covering: ' + str( details['target coverage'][0] ) +'-'+ str( details['target coverage'][1] ) +' ('+ str( details['target length'] ) + ' positions)\n' text += '\nfrom: ' + details['template'] +' '+ details['template chain'] +' (' + str( details['template coverage'][0] ) +'-'+ str( details['template coverage'][1] ) +') from ' + details['organism'] +'\n' text += '\nsequence identity: ' + str( details['sequence identity'] )[:6] + ' (evalue ' + str( details['evalue'] )[:6] +')\n' text += 'model score: ' + str( details['model score'] )[:6] +'\n' # optionally include the run details if include_run_details: text += '\n' text += details['experiment'] + ' by ' + details['method'] + ' using ' + details['program'] +'\n' text += 'from: ' + details['ModPipe run'] +'\n' print text[:-1] # optionally return the text if export: return text[:-1] # simple wrapper def get_modbase_model_details( models , add_model_numbers = True , display = True , export = False ): """ Returns the details of the model text <models> Optionally <display> the details Optionally <export> the details AND a summary str ...this is quite messy, but if the interpreter closes, the data must be extracted from the downloaded files, hence replicate the naming conventions used... """ # get the details # add hacky error check here... permissive_models = [] for i in models: try: model = extract_model_details_from_modbase_header( i ) permissive_models.append( model ) except: print 'HEY! very rare bug found! ModBase model based on icode regions...not today friends...' permissive_models.append( { 'FAIL' : 'model indices contain and icode...this makes re-aligning VERY complicated...' , 'model' : '???' , 'sequence identity' : 0 , 'model score' : 0 , 'target length' : 0 , # more hacking...ugh 'target coverage' : [0 , 0] } ) #continue details = permissive_models # optionally add the model # into the name if add_model_numbers: for i in xrange( len( details ) ): details[i]['model'] = '#' + str( i + 1 ) +', '+ details[i]['model'] # make text for display/writing # optionally display the model summay...because of how this is setup... if display: text = '' for i in xrange( len( details ) ): temp = '='*80 print temp text += temp +'\n' text += display_modbase_model_details( details[i] , export = True ) +'\n' print # clearner... # optionally return the details and a text summary...hacky... if export: return details , text return details # messy...based on default naming... def determine_modbase_models_from_modbase_directory( query , out_directory = 'modbase_models' , root_filename = '' ): """ Returns the "expected" model filenames from <out_directory> downloaded from ModBase ...this is quite messy, but if the interpreter closes, the data must be extracted from the downloaded files, hence replicate the naming conventions used... """ # defaults for written files if not root_filename: root_filename = 'modbase_' + query if not out_directory: out_directory = './' # here! # ta da! return [i for i in os.listdir( out_directory ) if root_filename + '_model_' in i and i[-4:] == '.pdb'] # ugh... def extract_modbase_model_details_from_modbase_directory( query , out_directory = 'modbase_models' , root_filename = '' , display = True ): """ Returns the model details of ModBase models for <query> in <out_directory> Optionally specify the <root_filename> from when the files were written Optionally <display> the details (for interactive use) ...this is quite messy, but if the interpreter closes, the data must be extracted from the downloaded files, hence replicate the naming conventions used... """ # get the models models = determine_modbase_models_from_modbase_directory( query , out_directory , root_filename ) # :( hacky...but must be sure # rather than an if... out_directory = out_directory.strip( '/' ) +'/' # load the text model_text = ['']*len( models ) for i in xrange( len( models ) ): f = open( out_directory + models[i] , 'r' ) model_text[i] = f.read() f.close() # extract the details details = get_modbase_model_details( model_text , display ) # put the models into details...cleaner output, just 1 dict for i in xrange( len( models ) ): details[i]['coordinates'] = models[i] details[i]['model'] = '#' + str( i + 1 ) +', '+ details[i]['model'] aln_filename = out_directory + models[i][:-4] + '_alignment.pir' if os.path.exists( aln_filename ): f = open( aln_filename , 'r' ) details[i]['alignment'] = f.read() f.close() return details ################################################################################ # MAIN if __name__ == '__main__': # parser object for managing input options parser = optparse.OptionParser() # essential data parser.add_option( '-q' , dest = 'query' , default = '' , help = 'the (UniProt) ID to download from ModBase' ) parser.add_option( '-o' , dest = 'out_directory' , default = 'modbase_models' , help = 'the name of the directory to create, for storing the downloads' ) parser.add_option( '-r' , dest = 'root_filename' , default = '' , help = 'the \"root\" for naming the files, defaults to the query' ) parser.add_option( '-d' , dest = 'dataset' , default = '' , help = 'which ModBase dataset to download from' ) # boolean options, sry if this is confusing, default them all to True # so when you use them, you turn them off parser.add_option( '-a' , dest = 'get_alignment' , default = True , help = 'include the alignments per model/template? default=True' , action = 'store_false' ) parser.add_option( '-w' , dest = 'write_summary' , default = True , help = 'output the stdout summary of the models? default=True' , action = 'store_false' ) parser.add_option( '-v' , dest = 'display' , default = True , help = 'print out \"useful\" information while running? default=True' , action = 'store_false' ) (options,args) = parser.parse_args() # check inputs # no edits/modifications # kinda silly, but I do this as "my style", easy to modify cleanly query = options.query out_directory = options.out_directory root_filename = options.root_filename dataset = options.dataset get_alignment = options.get_alignment write_summary = options.write_summary display = options.display # choose the default method to run download_models_from_modbase( query , out_directory = out_directory , root_filename = root_filename , dataset = dataset , get_alignment = get_alignment , write_summary = write_summary , display = display )
best_score = max( [i['target length'] for i in details] ) matches = [i for i in details if i['target length'] == best_score]
conditional_block
modbase.py
#!/usr/bin/env python # :noTabs=true: """ remotely download from ModBase internal method "download_models_from_modbase" is the real winner other methods are for reloading afterwards (if necessary) so...some quirks, there seem to be multiple "user levels" including a distinction between "public" and "academic"...this seems odd to me so for now just looking at the default (public?) interface... or academic is default? reference the modeller.py script for scoring info...sorta... or nope there may be other data that is extractable from ModBase...however this is a bit obfuscated, for now, just focus on downloading the models and alignments todo: rework: download, then extract! add in quality features... Author: Evan H. Baugh """ ################################################################################ # IMPORT # common modules import os import urllib2 from xml.dom.minidom import parse as xml_parse import shutil import optparse # bigger modules # custom modules #from helper import create_directory , copy_file # place these simple helper scripts here instead, stand alone! ################################################################################ # HELPER METHODS # not very interesting, just ignore these # here because I like defining my own ways for these utilities which are in # another module...but you only care about ModBase stuff right? so # just put a copy here, that simple... # helper for creating a directory, checks and delets existing name def
( dir_name , tagline = ' to sort the data' ): """ Creates the directory <dir_name> WARNING: this will delete the directory and its contents if it already exists! Optionally output something special in <tagline> """ # check if it exists print 'Creating a new directory ' + os.path.relpath( dir_name ) + tagline if os.path.isdir( dir_name ): print 'a directory named ' + os.path.relpath( dir_name ) + ' already exists, deleting it now...' shutil.rmtree( dir_name ) os.mkdir( dir_name ) # copy helper def copy_file( filename , destination , display = False ): """ Copy <filename> to/into <destination> just a cp wrapper...what? """ if display: # optional if os.path.isdir( destination ): print 'placing a copy of ' + os.path.relpath( filename ) + ' into the ' + os.path.relpath( destination ) + ' directory' elif os.path.isfile( destination ): print 'copying ' + os.path.relpath( filename ) + ' to ' + os.path.relpath( destination ) shutil.copy( filename , destination ) ################################################################################ # METHODS # woohoo! def download_models_from_modbase( query , out_directory = 'modbase_models' , root_filename = '' , dataset = '' , get_alignment = True , write_summary = True , display = True ): """ REQUIRES INTERNET CONNECTION Returns "details" on the models for <query> in ModBase write results to <out_directory> with the base <root_filename> Optionally request models from a specific <dataset> Optionally <get_alingment> too (as PIR file) Optionally <display> a summary of the results Optionally <write_summary> of the models (human readable, also displays) ModBase documentation claims that the interface can accept: databaseID database ID, let's use UniProt dataset a particular ModBase run? modelID same? seqID same? dataset the ModWeb JobID... type "model" or "alignment", this method handles this and that any of the first 4 is enough to identify the target (?) ...for simplicity, let's just look using UniProt IDs as "databaseIDs" apparently to use "non-public" access additional work must be done (something about a "cookies.txt" file, though this seems specific to "wget", may be able to pass in user/password as "modbase_user" and "modbase_passwd") uses xml.dom.minidom to parse the HTML returned...this may not be kosher... but it works...and is easier than using htmllib or sgmllib...(?) """ # url url = 'http://salilab.org/modbase/retrieve/modbase' # format the search query print 'searching modbase for \"' + query +'\"' url += '?databaseID=' + query # currently unused...so why put it here? #for i in search_options.keys(): # url += '&' + i +'='+ search_options[i] # the dataset # if not 'dataset' in search_options.keys() and dataset: if dataset: url += '&dataset=' + dataset # go get the results print 'obtaining model results from:\n\t' + url raw_stream = urllib2.urlopen( url + '&type=model' ) print 'finished downloading models, summarizing the results...' # parse the results results = xml_parse( raw_stream ) # check if empty if not len( results.toxml() ) > 100: # ahhh! I hate arbitrary numbers!!! print 'no models exist in ModBase for this protein...' return {} # get the ids #ids = get_str_from_xml_tag( results , 'model_id' ) # no need, in the header of the model # get the models models = get_str_from_xml_tag( results , 'content' ) # extract the details details , text = get_modbase_model_details( models , display or write_summary , export = True ) # defaults for writing files if not root_filename: root_filename = 'modbase_' + query # optionally write the models if out_directory: create_directory( out_directory , ' to store the models as PDB files' ) print 'writing the downloaded models to ' + out_directory count = 1 filenames = [] for i in models: # write it filename = out_directory + '/' + root_filename + '_model_' + str( count ) + '.pdb' filenames.append( os.path.abspath( filename ) ) # write the alignment f = open( filename , 'w' ) f.write( i.strip() ) f.close() count += 1 # change this in this case models = filenames # SOOO HACKY!!!! # for later safety... out_directory += '/' # optionally grab the alignment too if get_alignment: print 'also downloading the alignments...' raw_aln_stream = urllib2.urlopen( url + '&type=alignment' ) # parse the results aln_results = xml_parse( raw_aln_stream ) # get the files aln_results = aln_results.getElementsByTagName( 'alignmentfile' ) # ...for now, just get the text itself # don't worry about the other details in the XML file print 'writing the alignments as PIR files...' count = 1 for i in aln_results: i = get_str_from_xml_tag( i , 'content' )[0] # just 1, always the first # if out_directory is empty...this will just do as we want filename = out_directory + root_filename + '_model_' + str( count ) + '_alignment.pir' f = open( filename , 'w' ) f.write( i ) f.close() # convert them? # doesn't seem to load these "pir" files...? :( # save in the details? details[count - 1]['alignment'] = i count += 1 # put the models (filenames) into details...cleaner output, just 1 dict for i in xrange( len( models ) ): details[i]['coordinates'] = models[i] # find the "best" model temp = '\nevaluating the \"best\" model by comparing:\n\t1. sequence identity\n\t2. model score\n\t3. target length' print temp text += temp +'\n' best_score = max( [i['sequence identity'] for i in details] ) matches = [i for i in details if i['sequence identity'] == best_score] if len( matches ) > 1 and sum( [not i['model score'] == matches[0]['model score'] for i in matches[1:]] ): # find the best model score best_score = max( [i['model score'] for i in details] ) matches = [i for i in details if i['model score'] == best_score] if len( matches ) > 1 and sum( [not i['target length'] == matches[0]['target length'] for i in matches[1:]] ): best_score = max( [i['target length'] for i in details] ) matches = [i for i in details if i['target length'] == best_score] # debug output if len( matches ) > 1: temp = 'multiple models are \"equally the best\":' print temp text += temp +'\n' for i in matches: temp = '\t'+ i['coordinates'] print temp text += temp +'\n' temp = 'copying the first on to best_model.pdb' print temp text += temp +'\n' else: temp = 'best model: ' + matches[0]['coordinates'] print temp text += temp # move it to a indicative filename copy_file( matches[0]['coordinates'] , out_directory + '/best_model.pdb' ) # optionally write a summary file if write_summary: # if out_directory is empty...this will just do as we want filename = out_directory + root_filename + '_summary.txt' f = open( filename , 'w' ) f.write( text ) f.close() # just the details, has everything else... return details # very hacky wrapper def get_str_from_xml_tag( xml_object , tag ): """ So...I don't have time to learn proper XML parsing with the Python "xml" library right now and this approach works...so yeah simply return a list of str for the target <tag> in <xml_object> """ # get it results = xml_object.getElementsByTagName( tag ) # convert to string L = len( tag ) results = [i.toxml()[L + 2:-(L + 3)].strip() for i in results] return results # useful simple text parsing def extract_model_details_from_modbase_header( modbase_model_text ): """ Returns a dict of the model details from <modbase_model_text> this includes the PDB template, coverage details (always continuous), and alignment/modeling details """ # setup defaults, cleaner display details = { 'model' : '?' , 'organism' : '?' , 'experiment' : '?' , 'method' : '?' , 'program' : '?' , 'sequence identity' : 0 , 'model score' : 0 , 'evalue' : 0 , 'template' : '?' , 'template chain' : '?' , 'template coverage' : [] , 'target length record' : 0 , 'target coverage' : [] , 'template length' : 0 , 'target length' : 0 , 'ModPipe run' : '?' , 'modelID' : '?' , 'alignmentID' : '?' } # over the lines for i in modbase_model_text.split( '\n' ): if i[:4] == 'ATOM': # done! end of the header break elif i[:6] == 'HEADER': details['model'] = str( i.replace( 'HEADER' , '' ).strip() ) #elif i[:5] == 'TITLE': # ...uh, the ones I looked at, this was useless... elif i[:6] == 'SOURCE': details['organism'] = str( i.replace( 'SOURCE' , '' ).strip() ) #elif i[:6] == 'AUTHOR': # don't care about authors for now... elif i[:10] == 'REMARK 220': i = str( i.replace( 'REMARK 220' , '' ).strip() ) # keep sorting... if i[:16] == 'EXPERIMENT TYPE:': details['experiment'] = str( i.replace( 'EXPERIMENT TYPE:' , '' ).strip() ).capitalize() elif i[:7] == 'METHOD:': details['method'] = str( i.replace( 'METHOD:' , '' ).strip() ).capitalize() elif i[:8] == 'PROGRAM:': details['program'] = str( i.replace( 'PROGRAM:' , '' ).strip() ) elif i[:18] == 'SEQUENCE IDENTITY:': # as fraction please details['sequence identity'] = float( i.replace( 'SEQUENCE IDENTITY:' , '' ).strip() )/100 elif i[:12] == 'MODEL SCORE:': # as float details['model score'] = float( i.replace( 'MODEL SCORE:' , '' ).strip() ) elif i[:7] == 'EVALUE:': # as float details['evalue'] = float( i.replace( 'EVALUE:' , '' ).strip() ) elif i[:13] == 'TEMPLATE PDB:': details['template'] = str( i.replace( 'TEMPLATE PDB:' , '' ).strip().upper() ) elif i[:15] == 'TEMPLATE CHAIN:': details['template chain'] = str( i.replace( 'TEMPLATE CHAIN:' , '' ).strip() ) elif i[:15] == 'TEMPLATE BEGIN:': details['template coverage'].append( int( i.replace( 'TEMPLATE BEGIN:' , '' ).strip() ) ) elif i[:13] == 'TEMPLATE END:': details['template coverage'].append( int( i.replace( 'TEMPLATE END:' , '' ).strip() ) ) elif i[:14] == 'TARGET LENGTH:': details['target length record'] = int( i.replace( 'TARGET LENGTH:' , '' ).strip() ) elif i[:13] == 'TARGET BEGIN:': details['target coverage'].append( int( i.replace( 'TARGET BEGIN:' , '' ).strip() ) ) elif i[:11] == 'TARGET END:': details['target coverage'].append( int( i.replace( 'TARGET END:' , '' ).strip() ) ) elif i[:12] == 'MODPIPE RUN:': details['ModPipe run'] = str( i.replace( 'MODPIPE RUN:' , '' ).strip() ) elif i[:17] == 'MODPIPE MODEL ID:': details['modelID'] = str( i.replace( 'MODPIPE MODEL ID:' , '' ).strip() ) elif i[:21] == 'MODPIPE ALIGNMENT ID:': details['alignmentID'] = str( i.replace( 'MODPIPE ALIGNMENT ID:' , '' ).strip() ) # for own sanity details['template coverage'].sort() details['template length'] = details['template coverage'][1] - details['template coverage'][0] + 1 details['target coverage'].sort() details['target length'] = details['target coverage'][1] - details['target coverage'][0] + 1 return details # silly interactive method def display_modbase_model_details( details , include_run_details = False , export = False ): """ Displays a summary of the ModBase model <details> Optionally <include_run_details> Optionally <export> the summary text """ # check the input if isinstance( details , str ): # assume it just needs to be parsed out details = extract_model_details_from_modbase_header( details ) # exit condition if 'FAIL' in details.keys(): text = details['model'] +'\n' text += 'FAILED: ' + details['FAIL'] +'\n' print text[:-1] return text[:-1] # make the string text = details['model'] +'\n' text += 'covering: ' + str( details['target coverage'][0] ) +'-'+ str( details['target coverage'][1] ) +' ('+ str( details['target length'] ) + ' positions)\n' text += '\nfrom: ' + details['template'] +' '+ details['template chain'] +' (' + str( details['template coverage'][0] ) +'-'+ str( details['template coverage'][1] ) +') from ' + details['organism'] +'\n' text += '\nsequence identity: ' + str( details['sequence identity'] )[:6] + ' (evalue ' + str( details['evalue'] )[:6] +')\n' text += 'model score: ' + str( details['model score'] )[:6] +'\n' # optionally include the run details if include_run_details: text += '\n' text += details['experiment'] + ' by ' + details['method'] + ' using ' + details['program'] +'\n' text += 'from: ' + details['ModPipe run'] +'\n' print text[:-1] # optionally return the text if export: return text[:-1] # simple wrapper def get_modbase_model_details( models , add_model_numbers = True , display = True , export = False ): """ Returns the details of the model text <models> Optionally <display> the details Optionally <export> the details AND a summary str ...this is quite messy, but if the interpreter closes, the data must be extracted from the downloaded files, hence replicate the naming conventions used... """ # get the details # add hacky error check here... permissive_models = [] for i in models: try: model = extract_model_details_from_modbase_header( i ) permissive_models.append( model ) except: print 'HEY! very rare bug found! ModBase model based on icode regions...not today friends...' permissive_models.append( { 'FAIL' : 'model indices contain and icode...this makes re-aligning VERY complicated...' , 'model' : '???' , 'sequence identity' : 0 , 'model score' : 0 , 'target length' : 0 , # more hacking...ugh 'target coverage' : [0 , 0] } ) #continue details = permissive_models # optionally add the model # into the name if add_model_numbers: for i in xrange( len( details ) ): details[i]['model'] = '#' + str( i + 1 ) +', '+ details[i]['model'] # make text for display/writing # optionally display the model summay...because of how this is setup... if display: text = '' for i in xrange( len( details ) ): temp = '='*80 print temp text += temp +'\n' text += display_modbase_model_details( details[i] , export = True ) +'\n' print # clearner... # optionally return the details and a text summary...hacky... if export: return details , text return details # messy...based on default naming... def determine_modbase_models_from_modbase_directory( query , out_directory = 'modbase_models' , root_filename = '' ): """ Returns the "expected" model filenames from <out_directory> downloaded from ModBase ...this is quite messy, but if the interpreter closes, the data must be extracted from the downloaded files, hence replicate the naming conventions used... """ # defaults for written files if not root_filename: root_filename = 'modbase_' + query if not out_directory: out_directory = './' # here! # ta da! return [i for i in os.listdir( out_directory ) if root_filename + '_model_' in i and i[-4:] == '.pdb'] # ugh... def extract_modbase_model_details_from_modbase_directory( query , out_directory = 'modbase_models' , root_filename = '' , display = True ): """ Returns the model details of ModBase models for <query> in <out_directory> Optionally specify the <root_filename> from when the files were written Optionally <display> the details (for interactive use) ...this is quite messy, but if the interpreter closes, the data must be extracted from the downloaded files, hence replicate the naming conventions used... """ # get the models models = determine_modbase_models_from_modbase_directory( query , out_directory , root_filename ) # :( hacky...but must be sure # rather than an if... out_directory = out_directory.strip( '/' ) +'/' # load the text model_text = ['']*len( models ) for i in xrange( len( models ) ): f = open( out_directory + models[i] , 'r' ) model_text[i] = f.read() f.close() # extract the details details = get_modbase_model_details( model_text , display ) # put the models into details...cleaner output, just 1 dict for i in xrange( len( models ) ): details[i]['coordinates'] = models[i] details[i]['model'] = '#' + str( i + 1 ) +', '+ details[i]['model'] aln_filename = out_directory + models[i][:-4] + '_alignment.pir' if os.path.exists( aln_filename ): f = open( aln_filename , 'r' ) details[i]['alignment'] = f.read() f.close() return details ################################################################################ # MAIN if __name__ == '__main__': # parser object for managing input options parser = optparse.OptionParser() # essential data parser.add_option( '-q' , dest = 'query' , default = '' , help = 'the (UniProt) ID to download from ModBase' ) parser.add_option( '-o' , dest = 'out_directory' , default = 'modbase_models' , help = 'the name of the directory to create, for storing the downloads' ) parser.add_option( '-r' , dest = 'root_filename' , default = '' , help = 'the \"root\" for naming the files, defaults to the query' ) parser.add_option( '-d' , dest = 'dataset' , default = '' , help = 'which ModBase dataset to download from' ) # boolean options, sry if this is confusing, default them all to True # so when you use them, you turn them off parser.add_option( '-a' , dest = 'get_alignment' , default = True , help = 'include the alignments per model/template? default=True' , action = 'store_false' ) parser.add_option( '-w' , dest = 'write_summary' , default = True , help = 'output the stdout summary of the models? default=True' , action = 'store_false' ) parser.add_option( '-v' , dest = 'display' , default = True , help = 'print out \"useful\" information while running? default=True' , action = 'store_false' ) (options,args) = parser.parse_args() # check inputs # no edits/modifications # kinda silly, but I do this as "my style", easy to modify cleanly query = options.query out_directory = options.out_directory root_filename = options.root_filename dataset = options.dataset get_alignment = options.get_alignment write_summary = options.write_summary display = options.display # choose the default method to run download_models_from_modbase( query , out_directory = out_directory , root_filename = root_filename , dataset = dataset , get_alignment = get_alignment , write_summary = write_summary , display = display )
create_directory
identifier_name
modbase.py
#!/usr/bin/env python # :noTabs=true: """ remotely download from ModBase internal method "download_models_from_modbase" is the real winner other methods are for reloading afterwards (if necessary) so...some quirks, there seem to be multiple "user levels" including a distinction between "public" and "academic"...this seems odd to me so for now just looking at the default (public?) interface... or academic is default? reference the modeller.py script for scoring info...sorta... or nope there may be other data that is extractable from ModBase...however this is a bit obfuscated, for now, just focus on downloading the models and alignments todo: rework: download, then extract! add in quality features... Author: Evan H. Baugh """ ################################################################################ # IMPORT # common modules import os import urllib2 from xml.dom.minidom import parse as xml_parse import shutil import optparse # bigger modules # custom modules #from helper import create_directory , copy_file # place these simple helper scripts here instead, stand alone! ################################################################################ # HELPER METHODS # not very interesting, just ignore these # here because I like defining my own ways for these utilities which are in # another module...but you only care about ModBase stuff right? so # just put a copy here, that simple... # helper for creating a directory, checks and delets existing name def create_directory( dir_name , tagline = ' to sort the data' ): """ Creates the directory <dir_name> WARNING: this will delete the directory and its contents if it already exists! Optionally output something special in <tagline> """ # check if it exists print 'Creating a new directory ' + os.path.relpath( dir_name ) + tagline if os.path.isdir( dir_name ): print 'a directory named ' + os.path.relpath( dir_name ) + ' already exists, deleting it now...' shutil.rmtree( dir_name ) os.mkdir( dir_name ) # copy helper def copy_file( filename , destination , display = False ): """ Copy <filename> to/into <destination> just a cp wrapper...what? """ if display: # optional if os.path.isdir( destination ): print 'placing a copy of ' + os.path.relpath( filename ) + ' into the ' + os.path.relpath( destination ) + ' directory' elif os.path.isfile( destination ): print 'copying ' + os.path.relpath( filename ) + ' to ' + os.path.relpath( destination ) shutil.copy( filename , destination ) ################################################################################ # METHODS # woohoo! def download_models_from_modbase( query , out_directory = 'modbase_models' , root_filename = '' , dataset = '' , get_alignment = True , write_summary = True , display = True ): """ REQUIRES INTERNET CONNECTION Returns "details" on the models for <query> in ModBase write results to <out_directory> with the base <root_filename> Optionally request models from a specific <dataset> Optionally <get_alingment> too (as PIR file) Optionally <display> a summary of the results Optionally <write_summary> of the models (human readable, also displays) ModBase documentation claims that the interface can accept: databaseID database ID, let's use UniProt dataset a particular ModBase run? modelID same? seqID same? dataset the ModWeb JobID... type "model" or "alignment", this method handles this and that any of the first 4 is enough to identify the target (?) ...for simplicity, let's just look using UniProt IDs as "databaseIDs" apparently to use "non-public" access additional work must be done (something about a "cookies.txt" file, though this seems specific to "wget", may be able to pass in user/password as "modbase_user" and "modbase_passwd") uses xml.dom.minidom to parse the HTML returned...this may not be kosher... but it works...and is easier than using htmllib or sgmllib...(?) """ # url url = 'http://salilab.org/modbase/retrieve/modbase' # format the search query print 'searching modbase for \"' + query +'\"' url += '?databaseID=' + query # currently unused...so why put it here? #for i in search_options.keys(): # url += '&' + i +'='+ search_options[i] # the dataset # if not 'dataset' in search_options.keys() and dataset: if dataset: url += '&dataset=' + dataset # go get the results print 'obtaining model results from:\n\t' + url raw_stream = urllib2.urlopen( url + '&type=model' ) print 'finished downloading models, summarizing the results...' # parse the results results = xml_parse( raw_stream ) # check if empty if not len( results.toxml() ) > 100: # ahhh! I hate arbitrary numbers!!! print 'no models exist in ModBase for this protein...' return {} # get the ids #ids = get_str_from_xml_tag( results , 'model_id' ) # no need, in the header of the model # get the models models = get_str_from_xml_tag( results , 'content' ) # extract the details details , text = get_modbase_model_details( models , display or write_summary , export = True ) # defaults for writing files if not root_filename: root_filename = 'modbase_' + query # optionally write the models if out_directory: create_directory( out_directory , ' to store the models as PDB files' ) print 'writing the downloaded models to ' + out_directory count = 1 filenames = [] for i in models: # write it filename = out_directory + '/' + root_filename + '_model_' + str( count ) + '.pdb' filenames.append( os.path.abspath( filename ) ) # write the alignment f = open( filename , 'w' ) f.write( i.strip() ) f.close() count += 1 # change this in this case models = filenames # SOOO HACKY!!!! # for later safety... out_directory += '/' # optionally grab the alignment too if get_alignment: print 'also downloading the alignments...' raw_aln_stream = urllib2.urlopen( url + '&type=alignment' ) # parse the results aln_results = xml_parse( raw_aln_stream ) # get the files aln_results = aln_results.getElementsByTagName( 'alignmentfile' ) # ...for now, just get the text itself # don't worry about the other details in the XML file print 'writing the alignments as PIR files...' count = 1 for i in aln_results: i = get_str_from_xml_tag( i , 'content' )[0] # just 1, always the first # if out_directory is empty...this will just do as we want filename = out_directory + root_filename + '_model_' + str( count ) + '_alignment.pir' f = open( filename , 'w' ) f.write( i ) f.close() # convert them? # doesn't seem to load these "pir" files...? :( # save in the details? details[count - 1]['alignment'] = i count += 1 # put the models (filenames) into details...cleaner output, just 1 dict for i in xrange( len( models ) ): details[i]['coordinates'] = models[i] # find the "best" model temp = '\nevaluating the \"best\" model by comparing:\n\t1. sequence identity\n\t2. model score\n\t3. target length' print temp text += temp +'\n' best_score = max( [i['sequence identity'] for i in details] ) matches = [i for i in details if i['sequence identity'] == best_score] if len( matches ) > 1 and sum( [not i['model score'] == matches[0]['model score'] for i in matches[1:]] ): # find the best model score best_score = max( [i['model score'] for i in details] ) matches = [i for i in details if i['model score'] == best_score] if len( matches ) > 1 and sum( [not i['target length'] == matches[0]['target length'] for i in matches[1:]] ): best_score = max( [i['target length'] for i in details] ) matches = [i for i in details if i['target length'] == best_score] # debug output if len( matches ) > 1: temp = 'multiple models are \"equally the best\":' print temp text += temp +'\n' for i in matches: temp = '\t'+ i['coordinates'] print temp text += temp +'\n' temp = 'copying the first on to best_model.pdb' print temp text += temp +'\n' else: temp = 'best model: ' + matches[0]['coordinates'] print temp text += temp # move it to a indicative filename copy_file( matches[0]['coordinates'] , out_directory + '/best_model.pdb' ) # optionally write a summary file if write_summary: # if out_directory is empty...this will just do as we want filename = out_directory + root_filename + '_summary.txt' f = open( filename , 'w' ) f.write( text ) f.close() # just the details, has everything else... return details # very hacky wrapper def get_str_from_xml_tag( xml_object , tag ): """ So...I don't have time to learn proper XML parsing with the Python "xml" library right now and this approach works...so yeah simply return a list of str for the target <tag> in <xml_object> """ # get it results = xml_object.getElementsByTagName( tag ) # convert to string L = len( tag ) results = [i.toxml()[L + 2:-(L + 3)].strip() for i in results] return results # useful simple text parsing def extract_model_details_from_modbase_header( modbase_model_text ): """ Returns a dict of the model details from <modbase_model_text> this includes the PDB template, coverage details (always continuous), and alignment/modeling details """ # setup defaults, cleaner display details = { 'model' : '?' , 'organism' : '?' , 'experiment' : '?' , 'method' : '?' , 'program' : '?' , 'sequence identity' : 0 , 'model score' : 0 , 'evalue' : 0 , 'template' : '?' , 'template chain' : '?' , 'template coverage' : [] , 'target length record' : 0 , 'target coverage' : [] , 'template length' : 0 , 'target length' : 0 , 'ModPipe run' : '?' , 'modelID' : '?' , 'alignmentID' : '?' } # over the lines for i in modbase_model_text.split( '\n' ): if i[:4] == 'ATOM': # done! end of the header break elif i[:6] == 'HEADER': details['model'] = str( i.replace( 'HEADER' , '' ).strip() ) #elif i[:5] == 'TITLE': # ...uh, the ones I looked at, this was useless... elif i[:6] == 'SOURCE': details['organism'] = str( i.replace( 'SOURCE' , '' ).strip() ) #elif i[:6] == 'AUTHOR': # don't care about authors for now... elif i[:10] == 'REMARK 220': i = str( i.replace( 'REMARK 220' , '' ).strip() ) # keep sorting... if i[:16] == 'EXPERIMENT TYPE:': details['experiment'] = str( i.replace( 'EXPERIMENT TYPE:' , '' ).strip() ).capitalize() elif i[:7] == 'METHOD:': details['method'] = str( i.replace( 'METHOD:' , '' ).strip() ).capitalize() elif i[:8] == 'PROGRAM:': details['program'] = str( i.replace( 'PROGRAM:' , '' ).strip() ) elif i[:18] == 'SEQUENCE IDENTITY:': # as fraction please details['sequence identity'] = float( i.replace( 'SEQUENCE IDENTITY:' , '' ).strip() )/100 elif i[:12] == 'MODEL SCORE:': # as float
# as float details['evalue'] = float( i.replace( 'EVALUE:' , '' ).strip() ) elif i[:13] == 'TEMPLATE PDB:': details['template'] = str( i.replace( 'TEMPLATE PDB:' , '' ).strip().upper() ) elif i[:15] == 'TEMPLATE CHAIN:': details['template chain'] = str( i.replace( 'TEMPLATE CHAIN:' , '' ).strip() ) elif i[:15] == 'TEMPLATE BEGIN:': details['template coverage'].append( int( i.replace( 'TEMPLATE BEGIN:' , '' ).strip() ) ) elif i[:13] == 'TEMPLATE END:': details['template coverage'].append( int( i.replace( 'TEMPLATE END:' , '' ).strip() ) ) elif i[:14] == 'TARGET LENGTH:': details['target length record'] = int( i.replace( 'TARGET LENGTH:' , '' ).strip() ) elif i[:13] == 'TARGET BEGIN:': details['target coverage'].append( int( i.replace( 'TARGET BEGIN:' , '' ).strip() ) ) elif i[:11] == 'TARGET END:': details['target coverage'].append( int( i.replace( 'TARGET END:' , '' ).strip() ) ) elif i[:12] == 'MODPIPE RUN:': details['ModPipe run'] = str( i.replace( 'MODPIPE RUN:' , '' ).strip() ) elif i[:17] == 'MODPIPE MODEL ID:': details['modelID'] = str( i.replace( 'MODPIPE MODEL ID:' , '' ).strip() ) elif i[:21] == 'MODPIPE ALIGNMENT ID:': details['alignmentID'] = str( i.replace( 'MODPIPE ALIGNMENT ID:' , '' ).strip() ) # for own sanity details['template coverage'].sort() details['template length'] = details['template coverage'][1] - details['template coverage'][0] + 1 details['target coverage'].sort() details['target length'] = details['target coverage'][1] - details['target coverage'][0] + 1 return details # silly interactive method def display_modbase_model_details( details , include_run_details = False , export = False ): """ Displays a summary of the ModBase model <details> Optionally <include_run_details> Optionally <export> the summary text """ # check the input if isinstance( details , str ): # assume it just needs to be parsed out details = extract_model_details_from_modbase_header( details ) # exit condition if 'FAIL' in details.keys(): text = details['model'] +'\n' text += 'FAILED: ' + details['FAIL'] +'\n' print text[:-1] return text[:-1] # make the string text = details['model'] +'\n' text += 'covering: ' + str( details['target coverage'][0] ) +'-'+ str( details['target coverage'][1] ) +' ('+ str( details['target length'] ) + ' positions)\n' text += '\nfrom: ' + details['template'] +' '+ details['template chain'] +' (' + str( details['template coverage'][0] ) +'-'+ str( details['template coverage'][1] ) +') from ' + details['organism'] +'\n' text += '\nsequence identity: ' + str( details['sequence identity'] )[:6] + ' (evalue ' + str( details['evalue'] )[:6] +')\n' text += 'model score: ' + str( details['model score'] )[:6] +'\n' # optionally include the run details if include_run_details: text += '\n' text += details['experiment'] + ' by ' + details['method'] + ' using ' + details['program'] +'\n' text += 'from: ' + details['ModPipe run'] +'\n' print text[:-1] # optionally return the text if export: return text[:-1] # simple wrapper def get_modbase_model_details( models , add_model_numbers = True , display = True , export = False ): """ Returns the details of the model text <models> Optionally <display> the details Optionally <export> the details AND a summary str ...this is quite messy, but if the interpreter closes, the data must be extracted from the downloaded files, hence replicate the naming conventions used... """ # get the details # add hacky error check here... permissive_models = [] for i in models: try: model = extract_model_details_from_modbase_header( i ) permissive_models.append( model ) except: print 'HEY! very rare bug found! ModBase model based on icode regions...not today friends...' permissive_models.append( { 'FAIL' : 'model indices contain and icode...this makes re-aligning VERY complicated...' , 'model' : '???' , 'sequence identity' : 0 , 'model score' : 0 , 'target length' : 0 , # more hacking...ugh 'target coverage' : [0 , 0] } ) #continue details = permissive_models # optionally add the model # into the name if add_model_numbers: for i in xrange( len( details ) ): details[i]['model'] = '#' + str( i + 1 ) +', '+ details[i]['model'] # make text for display/writing # optionally display the model summay...because of how this is setup... if display: text = '' for i in xrange( len( details ) ): temp = '='*80 print temp text += temp +'\n' text += display_modbase_model_details( details[i] , export = True ) +'\n' print # clearner... # optionally return the details and a text summary...hacky... if export: return details , text return details # messy...based on default naming... def determine_modbase_models_from_modbase_directory( query , out_directory = 'modbase_models' , root_filename = '' ): """ Returns the "expected" model filenames from <out_directory> downloaded from ModBase ...this is quite messy, but if the interpreter closes, the data must be extracted from the downloaded files, hence replicate the naming conventions used... """ # defaults for written files if not root_filename: root_filename = 'modbase_' + query if not out_directory: out_directory = './' # here! # ta da! return [i for i in os.listdir( out_directory ) if root_filename + '_model_' in i and i[-4:] == '.pdb'] # ugh... def extract_modbase_model_details_from_modbase_directory( query , out_directory = 'modbase_models' , root_filename = '' , display = True ): """ Returns the model details of ModBase models for <query> in <out_directory> Optionally specify the <root_filename> from when the files were written Optionally <display> the details (for interactive use) ...this is quite messy, but if the interpreter closes, the data must be extracted from the downloaded files, hence replicate the naming conventions used... """ # get the models models = determine_modbase_models_from_modbase_directory( query , out_directory , root_filename ) # :( hacky...but must be sure # rather than an if... out_directory = out_directory.strip( '/' ) +'/' # load the text model_text = ['']*len( models ) for i in xrange( len( models ) ): f = open( out_directory + models[i] , 'r' ) model_text[i] = f.read() f.close() # extract the details details = get_modbase_model_details( model_text , display ) # put the models into details...cleaner output, just 1 dict for i in xrange( len( models ) ): details[i]['coordinates'] = models[i] details[i]['model'] = '#' + str( i + 1 ) +', '+ details[i]['model'] aln_filename = out_directory + models[i][:-4] + '_alignment.pir' if os.path.exists( aln_filename ): f = open( aln_filename , 'r' ) details[i]['alignment'] = f.read() f.close() return details ################################################################################ # MAIN if __name__ == '__main__': # parser object for managing input options parser = optparse.OptionParser() # essential data parser.add_option( '-q' , dest = 'query' , default = '' , help = 'the (UniProt) ID to download from ModBase' ) parser.add_option( '-o' , dest = 'out_directory' , default = 'modbase_models' , help = 'the name of the directory to create, for storing the downloads' ) parser.add_option( '-r' , dest = 'root_filename' , default = '' , help = 'the \"root\" for naming the files, defaults to the query' ) parser.add_option( '-d' , dest = 'dataset' , default = '' , help = 'which ModBase dataset to download from' ) # boolean options, sry if this is confusing, default them all to True # so when you use them, you turn them off parser.add_option( '-a' , dest = 'get_alignment' , default = True , help = 'include the alignments per model/template? default=True' , action = 'store_false' ) parser.add_option( '-w' , dest = 'write_summary' , default = True , help = 'output the stdout summary of the models? default=True' , action = 'store_false' ) parser.add_option( '-v' , dest = 'display' , default = True , help = 'print out \"useful\" information while running? default=True' , action = 'store_false' ) (options,args) = parser.parse_args() # check inputs # no edits/modifications # kinda silly, but I do this as "my style", easy to modify cleanly query = options.query out_directory = options.out_directory root_filename = options.root_filename dataset = options.dataset get_alignment = options.get_alignment write_summary = options.write_summary display = options.display # choose the default method to run download_models_from_modbase( query , out_directory = out_directory , root_filename = root_filename , dataset = dataset , get_alignment = get_alignment , write_summary = write_summary , display = display )
details['model score'] = float( i.replace( 'MODEL SCORE:' , '' ).strip() ) elif i[:7] == 'EVALUE:':
random_line_split
modbase.py
#!/usr/bin/env python # :noTabs=true: """ remotely download from ModBase internal method "download_models_from_modbase" is the real winner other methods are for reloading afterwards (if necessary) so...some quirks, there seem to be multiple "user levels" including a distinction between "public" and "academic"...this seems odd to me so for now just looking at the default (public?) interface... or academic is default? reference the modeller.py script for scoring info...sorta... or nope there may be other data that is extractable from ModBase...however this is a bit obfuscated, for now, just focus on downloading the models and alignments todo: rework: download, then extract! add in quality features... Author: Evan H. Baugh """ ################################################################################ # IMPORT # common modules import os import urllib2 from xml.dom.minidom import parse as xml_parse import shutil import optparse # bigger modules # custom modules #from helper import create_directory , copy_file # place these simple helper scripts here instead, stand alone! ################################################################################ # HELPER METHODS # not very interesting, just ignore these # here because I like defining my own ways for these utilities which are in # another module...but you only care about ModBase stuff right? so # just put a copy here, that simple... # helper for creating a directory, checks and delets existing name def create_directory( dir_name , tagline = ' to sort the data' ): """ Creates the directory <dir_name> WARNING: this will delete the directory and its contents if it already exists! Optionally output something special in <tagline> """ # check if it exists print 'Creating a new directory ' + os.path.relpath( dir_name ) + tagline if os.path.isdir( dir_name ): print 'a directory named ' + os.path.relpath( dir_name ) + ' already exists, deleting it now...' shutil.rmtree( dir_name ) os.mkdir( dir_name ) # copy helper def copy_file( filename , destination , display = False ): """ Copy <filename> to/into <destination> just a cp wrapper...what? """ if display: # optional if os.path.isdir( destination ): print 'placing a copy of ' + os.path.relpath( filename ) + ' into the ' + os.path.relpath( destination ) + ' directory' elif os.path.isfile( destination ): print 'copying ' + os.path.relpath( filename ) + ' to ' + os.path.relpath( destination ) shutil.copy( filename , destination ) ################################################################################ # METHODS # woohoo! def download_models_from_modbase( query , out_directory = 'modbase_models' , root_filename = '' , dataset = '' , get_alignment = True , write_summary = True , display = True ): """ REQUIRES INTERNET CONNECTION Returns "details" on the models for <query> in ModBase write results to <out_directory> with the base <root_filename> Optionally request models from a specific <dataset> Optionally <get_alingment> too (as PIR file) Optionally <display> a summary of the results Optionally <write_summary> of the models (human readable, also displays) ModBase documentation claims that the interface can accept: databaseID database ID, let's use UniProt dataset a particular ModBase run? modelID same? seqID same? dataset the ModWeb JobID... type "model" or "alignment", this method handles this and that any of the first 4 is enough to identify the target (?) ...for simplicity, let's just look using UniProt IDs as "databaseIDs" apparently to use "non-public" access additional work must be done (something about a "cookies.txt" file, though this seems specific to "wget", may be able to pass in user/password as "modbase_user" and "modbase_passwd") uses xml.dom.minidom to parse the HTML returned...this may not be kosher... but it works...and is easier than using htmllib or sgmllib...(?) """ # url url = 'http://salilab.org/modbase/retrieve/modbase' # format the search query print 'searching modbase for \"' + query +'\"' url += '?databaseID=' + query # currently unused...so why put it here? #for i in search_options.keys(): # url += '&' + i +'='+ search_options[i] # the dataset # if not 'dataset' in search_options.keys() and dataset: if dataset: url += '&dataset=' + dataset # go get the results print 'obtaining model results from:\n\t' + url raw_stream = urllib2.urlopen( url + '&type=model' ) print 'finished downloading models, summarizing the results...' # parse the results results = xml_parse( raw_stream ) # check if empty if not len( results.toxml() ) > 100: # ahhh! I hate arbitrary numbers!!! print 'no models exist in ModBase for this protein...' return {} # get the ids #ids = get_str_from_xml_tag( results , 'model_id' ) # no need, in the header of the model # get the models models = get_str_from_xml_tag( results , 'content' ) # extract the details details , text = get_modbase_model_details( models , display or write_summary , export = True ) # defaults for writing files if not root_filename: root_filename = 'modbase_' + query # optionally write the models if out_directory: create_directory( out_directory , ' to store the models as PDB files' ) print 'writing the downloaded models to ' + out_directory count = 1 filenames = [] for i in models: # write it filename = out_directory + '/' + root_filename + '_model_' + str( count ) + '.pdb' filenames.append( os.path.abspath( filename ) ) # write the alignment f = open( filename , 'w' ) f.write( i.strip() ) f.close() count += 1 # change this in this case models = filenames # SOOO HACKY!!!! # for later safety... out_directory += '/' # optionally grab the alignment too if get_alignment: print 'also downloading the alignments...' raw_aln_stream = urllib2.urlopen( url + '&type=alignment' ) # parse the results aln_results = xml_parse( raw_aln_stream ) # get the files aln_results = aln_results.getElementsByTagName( 'alignmentfile' ) # ...for now, just get the text itself # don't worry about the other details in the XML file print 'writing the alignments as PIR files...' count = 1 for i in aln_results: i = get_str_from_xml_tag( i , 'content' )[0] # just 1, always the first # if out_directory is empty...this will just do as we want filename = out_directory + root_filename + '_model_' + str( count ) + '_alignment.pir' f = open( filename , 'w' ) f.write( i ) f.close() # convert them? # doesn't seem to load these "pir" files...? :( # save in the details? details[count - 1]['alignment'] = i count += 1 # put the models (filenames) into details...cleaner output, just 1 dict for i in xrange( len( models ) ): details[i]['coordinates'] = models[i] # find the "best" model temp = '\nevaluating the \"best\" model by comparing:\n\t1. sequence identity\n\t2. model score\n\t3. target length' print temp text += temp +'\n' best_score = max( [i['sequence identity'] for i in details] ) matches = [i for i in details if i['sequence identity'] == best_score] if len( matches ) > 1 and sum( [not i['model score'] == matches[0]['model score'] for i in matches[1:]] ): # find the best model score best_score = max( [i['model score'] for i in details] ) matches = [i for i in details if i['model score'] == best_score] if len( matches ) > 1 and sum( [not i['target length'] == matches[0]['target length'] for i in matches[1:]] ): best_score = max( [i['target length'] for i in details] ) matches = [i for i in details if i['target length'] == best_score] # debug output if len( matches ) > 1: temp = 'multiple models are \"equally the best\":' print temp text += temp +'\n' for i in matches: temp = '\t'+ i['coordinates'] print temp text += temp +'\n' temp = 'copying the first on to best_model.pdb' print temp text += temp +'\n' else: temp = 'best model: ' + matches[0]['coordinates'] print temp text += temp # move it to a indicative filename copy_file( matches[0]['coordinates'] , out_directory + '/best_model.pdb' ) # optionally write a summary file if write_summary: # if out_directory is empty...this will just do as we want filename = out_directory + root_filename + '_summary.txt' f = open( filename , 'w' ) f.write( text ) f.close() # just the details, has everything else... return details # very hacky wrapper def get_str_from_xml_tag( xml_object , tag ): """ So...I don't have time to learn proper XML parsing with the Python "xml" library right now and this approach works...so yeah simply return a list of str for the target <tag> in <xml_object> """ # get it results = xml_object.getElementsByTagName( tag ) # convert to string L = len( tag ) results = [i.toxml()[L + 2:-(L + 3)].strip() for i in results] return results # useful simple text parsing def extract_model_details_from_modbase_header( modbase_model_text ): """ Returns a dict of the model details from <modbase_model_text> this includes the PDB template, coverage details (always continuous), and alignment/modeling details """ # setup defaults, cleaner display details = { 'model' : '?' , 'organism' : '?' , 'experiment' : '?' , 'method' : '?' , 'program' : '?' , 'sequence identity' : 0 , 'model score' : 0 , 'evalue' : 0 , 'template' : '?' , 'template chain' : '?' , 'template coverage' : [] , 'target length record' : 0 , 'target coverage' : [] , 'template length' : 0 , 'target length' : 0 , 'ModPipe run' : '?' , 'modelID' : '?' , 'alignmentID' : '?' } # over the lines for i in modbase_model_text.split( '\n' ): if i[:4] == 'ATOM': # done! end of the header break elif i[:6] == 'HEADER': details['model'] = str( i.replace( 'HEADER' , '' ).strip() ) #elif i[:5] == 'TITLE': # ...uh, the ones I looked at, this was useless... elif i[:6] == 'SOURCE': details['organism'] = str( i.replace( 'SOURCE' , '' ).strip() ) #elif i[:6] == 'AUTHOR': # don't care about authors for now... elif i[:10] == 'REMARK 220': i = str( i.replace( 'REMARK 220' , '' ).strip() ) # keep sorting... if i[:16] == 'EXPERIMENT TYPE:': details['experiment'] = str( i.replace( 'EXPERIMENT TYPE:' , '' ).strip() ).capitalize() elif i[:7] == 'METHOD:': details['method'] = str( i.replace( 'METHOD:' , '' ).strip() ).capitalize() elif i[:8] == 'PROGRAM:': details['program'] = str( i.replace( 'PROGRAM:' , '' ).strip() ) elif i[:18] == 'SEQUENCE IDENTITY:': # as fraction please details['sequence identity'] = float( i.replace( 'SEQUENCE IDENTITY:' , '' ).strip() )/100 elif i[:12] == 'MODEL SCORE:': # as float details['model score'] = float( i.replace( 'MODEL SCORE:' , '' ).strip() ) elif i[:7] == 'EVALUE:': # as float details['evalue'] = float( i.replace( 'EVALUE:' , '' ).strip() ) elif i[:13] == 'TEMPLATE PDB:': details['template'] = str( i.replace( 'TEMPLATE PDB:' , '' ).strip().upper() ) elif i[:15] == 'TEMPLATE CHAIN:': details['template chain'] = str( i.replace( 'TEMPLATE CHAIN:' , '' ).strip() ) elif i[:15] == 'TEMPLATE BEGIN:': details['template coverage'].append( int( i.replace( 'TEMPLATE BEGIN:' , '' ).strip() ) ) elif i[:13] == 'TEMPLATE END:': details['template coverage'].append( int( i.replace( 'TEMPLATE END:' , '' ).strip() ) ) elif i[:14] == 'TARGET LENGTH:': details['target length record'] = int( i.replace( 'TARGET LENGTH:' , '' ).strip() ) elif i[:13] == 'TARGET BEGIN:': details['target coverage'].append( int( i.replace( 'TARGET BEGIN:' , '' ).strip() ) ) elif i[:11] == 'TARGET END:': details['target coverage'].append( int( i.replace( 'TARGET END:' , '' ).strip() ) ) elif i[:12] == 'MODPIPE RUN:': details['ModPipe run'] = str( i.replace( 'MODPIPE RUN:' , '' ).strip() ) elif i[:17] == 'MODPIPE MODEL ID:': details['modelID'] = str( i.replace( 'MODPIPE MODEL ID:' , '' ).strip() ) elif i[:21] == 'MODPIPE ALIGNMENT ID:': details['alignmentID'] = str( i.replace( 'MODPIPE ALIGNMENT ID:' , '' ).strip() ) # for own sanity details['template coverage'].sort() details['template length'] = details['template coverage'][1] - details['template coverage'][0] + 1 details['target coverage'].sort() details['target length'] = details['target coverage'][1] - details['target coverage'][0] + 1 return details # silly interactive method def display_modbase_model_details( details , include_run_details = False , export = False ):
# simple wrapper def get_modbase_model_details( models , add_model_numbers = True , display = True , export = False ): """ Returns the details of the model text <models> Optionally <display> the details Optionally <export> the details AND a summary str ...this is quite messy, but if the interpreter closes, the data must be extracted from the downloaded files, hence replicate the naming conventions used... """ # get the details # add hacky error check here... permissive_models = [] for i in models: try: model = extract_model_details_from_modbase_header( i ) permissive_models.append( model ) except: print 'HEY! very rare bug found! ModBase model based on icode regions...not today friends...' permissive_models.append( { 'FAIL' : 'model indices contain and icode...this makes re-aligning VERY complicated...' , 'model' : '???' , 'sequence identity' : 0 , 'model score' : 0 , 'target length' : 0 , # more hacking...ugh 'target coverage' : [0 , 0] } ) #continue details = permissive_models # optionally add the model # into the name if add_model_numbers: for i in xrange( len( details ) ): details[i]['model'] = '#' + str( i + 1 ) +', '+ details[i]['model'] # make text for display/writing # optionally display the model summay...because of how this is setup... if display: text = '' for i in xrange( len( details ) ): temp = '='*80 print temp text += temp +'\n' text += display_modbase_model_details( details[i] , export = True ) +'\n' print # clearner... # optionally return the details and a text summary...hacky... if export: return details , text return details # messy...based on default naming... def determine_modbase_models_from_modbase_directory( query , out_directory = 'modbase_models' , root_filename = '' ): """ Returns the "expected" model filenames from <out_directory> downloaded from ModBase ...this is quite messy, but if the interpreter closes, the data must be extracted from the downloaded files, hence replicate the naming conventions used... """ # defaults for written files if not root_filename: root_filename = 'modbase_' + query if not out_directory: out_directory = './' # here! # ta da! return [i for i in os.listdir( out_directory ) if root_filename + '_model_' in i and i[-4:] == '.pdb'] # ugh... def extract_modbase_model_details_from_modbase_directory( query , out_directory = 'modbase_models' , root_filename = '' , display = True ): """ Returns the model details of ModBase models for <query> in <out_directory> Optionally specify the <root_filename> from when the files were written Optionally <display> the details (for interactive use) ...this is quite messy, but if the interpreter closes, the data must be extracted from the downloaded files, hence replicate the naming conventions used... """ # get the models models = determine_modbase_models_from_modbase_directory( query , out_directory , root_filename ) # :( hacky...but must be sure # rather than an if... out_directory = out_directory.strip( '/' ) +'/' # load the text model_text = ['']*len( models ) for i in xrange( len( models ) ): f = open( out_directory + models[i] , 'r' ) model_text[i] = f.read() f.close() # extract the details details = get_modbase_model_details( model_text , display ) # put the models into details...cleaner output, just 1 dict for i in xrange( len( models ) ): details[i]['coordinates'] = models[i] details[i]['model'] = '#' + str( i + 1 ) +', '+ details[i]['model'] aln_filename = out_directory + models[i][:-4] + '_alignment.pir' if os.path.exists( aln_filename ): f = open( aln_filename , 'r' ) details[i]['alignment'] = f.read() f.close() return details ################################################################################ # MAIN if __name__ == '__main__': # parser object for managing input options parser = optparse.OptionParser() # essential data parser.add_option( '-q' , dest = 'query' , default = '' , help = 'the (UniProt) ID to download from ModBase' ) parser.add_option( '-o' , dest = 'out_directory' , default = 'modbase_models' , help = 'the name of the directory to create, for storing the downloads' ) parser.add_option( '-r' , dest = 'root_filename' , default = '' , help = 'the \"root\" for naming the files, defaults to the query' ) parser.add_option( '-d' , dest = 'dataset' , default = '' , help = 'which ModBase dataset to download from' ) # boolean options, sry if this is confusing, default them all to True # so when you use them, you turn them off parser.add_option( '-a' , dest = 'get_alignment' , default = True , help = 'include the alignments per model/template? default=True' , action = 'store_false' ) parser.add_option( '-w' , dest = 'write_summary' , default = True , help = 'output the stdout summary of the models? default=True' , action = 'store_false' ) parser.add_option( '-v' , dest = 'display' , default = True , help = 'print out \"useful\" information while running? default=True' , action = 'store_false' ) (options,args) = parser.parse_args() # check inputs # no edits/modifications # kinda silly, but I do this as "my style", easy to modify cleanly query = options.query out_directory = options.out_directory root_filename = options.root_filename dataset = options.dataset get_alignment = options.get_alignment write_summary = options.write_summary display = options.display # choose the default method to run download_models_from_modbase( query , out_directory = out_directory , root_filename = root_filename , dataset = dataset , get_alignment = get_alignment , write_summary = write_summary , display = display )
""" Displays a summary of the ModBase model <details> Optionally <include_run_details> Optionally <export> the summary text """ # check the input if isinstance( details , str ): # assume it just needs to be parsed out details = extract_model_details_from_modbase_header( details ) # exit condition if 'FAIL' in details.keys(): text = details['model'] +'\n' text += 'FAILED: ' + details['FAIL'] +'\n' print text[:-1] return text[:-1] # make the string text = details['model'] +'\n' text += 'covering: ' + str( details['target coverage'][0] ) +'-'+ str( details['target coverage'][1] ) +' ('+ str( details['target length'] ) + ' positions)\n' text += '\nfrom: ' + details['template'] +' '+ details['template chain'] +' (' + str( details['template coverage'][0] ) +'-'+ str( details['template coverage'][1] ) +') from ' + details['organism'] +'\n' text += '\nsequence identity: ' + str( details['sequence identity'] )[:6] + ' (evalue ' + str( details['evalue'] )[:6] +')\n' text += 'model score: ' + str( details['model score'] )[:6] +'\n' # optionally include the run details if include_run_details: text += '\n' text += details['experiment'] + ' by ' + details['method'] + ' using ' + details['program'] +'\n' text += 'from: ' + details['ModPipe run'] +'\n' print text[:-1] # optionally return the text if export: return text[:-1]
identifier_body
dir.go
// Copyright 2015 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package inode import ( "fmt" "path" "strings" "sync" "time" "github.com/simonwahlstrom/gcsfuse/internal/gcsx" "github.com/jacobsa/fuse/fuseops" "github.com/jacobsa/fuse/fuseutil" "github.com/jacobsa/gcloud/gcs" "github.com/jacobsa/syncutil" "github.com/jacobsa/timeutil" "golang.org/x/net/context" ) // IsDirName Does the supplied object name represent a directory (as opposed to a file or // symlink)? func IsDirName(name string) bool { return name == "" || name[len(name)-1] == '/' } // The result of looking up a child within a directory inode. See notes on // DirInode.LookUpChild for more info. type LookUpResult struct { // The GCS bucket where the lookup is performed. Bucket gcsx.SyncerBucket // For both object-backed children and implicit directories, the full // canonical name of the child. For example, if the parent inode is "foo/" // and the child is a directory, then this is "foo/bar/". // // Guaranteed to be present only if Exists(). FullName Name // The backing object for the child, if any. If the child is not found or // exists only as an implicit directory, this is nil. Object *gcs.Object // Does the child exist as a directory implicitly defined by its own // descendents? Meaningful only if Object is nil and implicit directories are // enabled for the parent inode. ImplicitDir bool } // Exists returns true iff the result indicates that the child exists, explicitly or // implicitly. func (lr *LookUpResult) Exists() bool { return lr.Object != nil || lr.ImplicitDir } // An inode representing a directory, with facilities for listing entries, // looking up children, and creating and deleting children. Must be locked for // any method additional to the Inode interface. type DirInode interface { BucketOwnedInode // Look up the direct child with the given relative name, returning // information about the object backing the child or whether it exists as an // implicit directory. If a file/symlink and a directory with the given name // both exist, the directory is preferred. Return a result with // !result.Exists() and a nil error if neither is found. // // Special case: if the name ends in ConflictingFileNameSuffix, we strip the // suffix, confirm that a conflicting directory exists, then return a result // for the file/symlink. // // If this inode was created with implicitDirs is set, this method will use // ListObjects to find child directories that are "implicitly" defined by the // existence of their own descendents. For example, if there is an object // named "foo/bar/baz" and this is the directory "foo", a child directory // named "bar" will be implied. In this case, result.ImplicitDir will be // true. LookUpChild( ctx context.Context, name string) (result LookUpResult, err error) // Read some number of entries from the directory, returning a continuation // token that can be used to pick up the read operation where it left off. // Supply the empty token on the first call. // // At the end of the directory, the returned continuation token will be // empty. Otherwise it will be non-empty. There is no guarantee about the // number of entries returned; it may be zero even with a non-empty // continuation token. // // The contents of the Offset and Inode fields for returned entries is // undefined. ReadEntries( ctx context.Context, tok string) (entries []fuseutil.Dirent, newTok string, err error) // Create an empty child file with the supplied (relative) name, failing with // *gcs.PreconditionError if a backing object already exists in GCS. // Return the full name of the child and the GCS object it backs up. CreateChildFile( ctx context.Context, name string) (fn Name, o *gcs.Object, err error) // Like CreateChildFile, except clone the supplied source object instead of // creating an empty object. // Return the full name of the child and the GCS object it backs up. CloneToChildFile( ctx context.Context, name string, src *gcs.Object) (fn Name, o *gcs.Object, err error) // Create a symlink object with the supplied (relative) name and the supplied // target, failing with *gcs.PreconditionError if a backing object already // exists in GCS. // Return the full name of the child and the GCS object it backs up. CreateChildSymlink( ctx context.Context, name string, target string) (fn Name, o *gcs.Object, err error) // Create a backing object for a child directory with the supplied (relative) // name, failing with *gcs.PreconditionError if a backing object already // exists in GCS. // Return the full name of the child and the GCS object it backs up. CreateChildDir( ctx context.Context, name string) (fn Name, o *gcs.Object, err error) // Delete the backing object for the child file or symlink with the given // (relative) name and generation number, where zero means the latest // generation. If the object/generation doesn't exist, no error is returned. // // metaGeneration may be set to a non-nil pointer giving a meta-generation // precondition, but need not be. DeleteChildFile( ctx context.Context, name string, generation int64, metaGeneration *int64) (err error) // Delete the backing object for the child directory with the given // (relative) name. DeleteChildDir( ctx context.Context, name string) (err error) } type dirInode struct { ///////////////////////// // Dependencies ///////////////////////// bucket gcsx.SyncerBucket mtimeClock timeutil.Clock cacheClock timeutil.Clock ///////////////////////// // Constant data ///////////////////////// id fuseops.InodeID implicitDirs bool // INVARIANT: name.IsDir() name Name attrs fuseops.InodeAttributes ///////////////////////// // Mutable state ///////////////////////// // A mutex that must be held when calling certain methods. See documentation // for each method. mu syncutil.InvariantMutex // GUARDED_BY(mu) lc lookupCount // cache.CheckInvariants() does not panic. // // GUARDED_BY(mu) cache typeCache } var _ DirInode = &dirInode{} // Create a directory inode for the name, representing the directory containing // the objects for which it is an immediate prefix. For the root directory, // this is the empty string. // // If implicitDirs is set, LookUpChild will use ListObjects to find child // directories that are "implicitly" defined by the existence of their own // descendents. For example, if there is an object named "foo/bar/baz" and this // is the directory "foo", a child directory named "bar" will be implied. // // If typeCacheTTL is non-zero, a cache from child name to information about // whether that name exists as a file/symlink and/or directory will be // maintained. This may speed up calls to LookUpChild, especially when combined // with a stat-caching GCS bucket, but comes at the cost of consistency: if the // child is removed and recreated with a different type before the expiration, // we may fail to find it. // // The initial lookup count is zero. // // REQUIRES: IsDirName(name) func NewDirInode( id fuseops.InodeID, name Name, attrs fuseops.InodeAttributes, implicitDirs bool, typeCacheTTL time.Duration, bucket gcsx.SyncerBucket, mtimeClock timeutil.Clock, cacheClock timeutil.Clock) (d DirInode) { if !name.IsDir() { panic(fmt.Sprintf("Unexpected name: %s", name)) } // Set up the struct. const typeCacheCapacity = 1 << 16 typed := &dirInode{ bucket: bucket, mtimeClock: mtimeClock, cacheClock: cacheClock, id: id, implicitDirs: implicitDirs, name: name, attrs: attrs, cache: newTypeCache(typeCacheCapacity/2, typeCacheTTL), } typed.lc.Init(id) // Set up invariant checking. typed.mu = syncutil.NewInvariantMutex(typed.checkInvariants) d = typed return } //////////////////////////////////////////////////////////////////////// // Helpers //////////////////////////////////////////////////////////////////////// func (d *dirInode) checkInvariants() { // INVARIANT: d.name.IsDir() if !d.name.IsDir() { panic(fmt.Sprintf("Unexpected name: %s", d.name)) } // cache.CheckInvariants() does not panic. d.cache.CheckInvariants() } func (d *dirInode) lookUpChildFile( ctx context.Context, name string) (result LookUpResult, err error) { result.Bucket = d.Bucket() result.FullName = NewFileName(d.Name(), name) result.Object, err = statObjectMayNotExist(ctx, d.bucket, result.FullName) if err != nil { err = fmt.Errorf("statObjectMayNotExist: %v", err) return } return } func (d *dirInode) lookUpChildDir( ctx context.Context, dirName string) (result LookUpResult, err error) { b := syncutil.NewBundle(ctx) childName := NewDirName(d.Name(), dirName) // Stat the placeholder object. b.Add(func(ctx context.Context) (err error) { result.Bucket = d.Bucket() result.FullName = childName result.Object, err = statObjectMayNotExist(ctx, d.bucket, result.FullName) if err != nil { err = fmt.Errorf("statObjectMayNotExist: %v", err) return } return }) // If implicit directories are enabled, find out whether the child name is // implicitly defined. if d.implicitDirs { b.Add(func(ctx context.Context) (err error) { result.ImplicitDir, err = objectNamePrefixNonEmpty( ctx, d.bucket, childName.GcsObjectName()) if err != nil { err = fmt.Errorf("objectNamePrefixNonEmpty: %v", err) return } return }) } // Wait for both. err = b.Join() if err != nil { return } return } // Look up the file for a (file, dir) pair with conflicting names, overriding // the default behavior. If the file doesn't exist, return a nil record with a // nil error. If the directory doesn't exist, pretend the file doesn't exist. // // REQUIRES: strings.HasSuffix(name, ConflictingFileNameSuffix) func (d *dirInode) lookUpConflicting( ctx context.Context, name string) (result LookUpResult, err error) { strippedName := strings.TrimSuffix(name, ConflictingFileNameSuffix) // In order to a marked name to be accepted, we require the conflicting // directory to exist. var dirResult LookUpResult dirResult, err = d.lookUpChildDir(ctx, strippedName) if err != nil { err = fmt.Errorf("lookUpChildDir for stripped name: %v", err) return } if !dirResult.Exists() { return } // The directory name exists. Find the conflicting file. result, err = d.lookUpChildFile(ctx, strippedName) if err != nil { err = fmt.Errorf("lookUpChildFile for stripped name: %v", err) return } return } // List the supplied object name prefix to find out whether it is non-empty. func objectNamePrefixNonEmpty( ctx context.Context, bucket gcs.Bucket, prefix string) (nonEmpty bool, err error) { req := &gcs.ListObjectsRequest{ Prefix: prefix, MaxResults: 1, } listing, err := bucket.ListObjects(ctx, req) if err != nil { err = fmt.Errorf("ListObjects: %v", err) return } nonEmpty = len(listing.Objects) != 0 return } // Stat the object with the given name, returning (nil, nil) if the object // doesn't exist rather than failing. func statObjectMayNotExist( ctx context.Context, bucket gcs.Bucket, name Name) (o *gcs.Object, err error) { // Call the bucket. req := &gcs.StatObjectRequest{ Name: name.GcsObjectName(), } o, err = bucket.StatObject(ctx, req) // Suppress "not found" errors. if _, ok := err.(*gcs.NotFoundError); ok { err = nil } // Annotate others. if err != nil { err = fmt.Errorf("StatObject: %v", err) return } return } // Fail if the name already exists. Pass on errors directly. func (d *dirInode) createNewObject( ctx context.Context, name Name, metadata map[string]string) (o *gcs.Object, err error) { // Create an empty backing object for the child, failing if it already // exists. var precond int64 createReq := &gcs.CreateObjectRequest{ Name: name.GcsObjectName(), Contents: strings.NewReader(""), GenerationPrecondition: &precond, Metadata: metadata, } o, err = d.bucket.CreateObject(ctx, createReq) if err != nil { return } return } // An implementation detail fo filterMissingChildDirs. func filterMissingChildDirNames( ctx context.Context, bucket gcs.Bucket, dirName Name, unfiltered <-chan string, filtered chan<- string) (err error) { for name := range unfiltered { var o *gcs.Object // Stat the placeholder. o, err = statObjectMayNotExist( ctx, bucket, NewDirName(dirName, name), ) if err != nil { err = fmt.Errorf("statObjectMayNotExist: %v", err) return } // Should we pass on this name? if o == nil { continue } select { case <-ctx.Done(): err = ctx.Err() return case filtered <- name: } } return } // Given a list of child names that appear to be directories according to // d.bucket.ListObjects (which always behaves as if implicit directories are // enabled), filter out the ones for which a placeholder object does not // actually exist. If implicit directories are enabled, simply return them all. // // LOCKS_REQUIRED(d) func (d *dirInode) filterMissingChildDirs( ctx context.Context, in []string) (out []string, err error) { // Do we need to do anything? if d.implicitDirs { out = in return } b := syncutil.NewBundle(ctx) // First add any names that we already know are directories according to our // cache, removing them from the input. now := d.cacheClock.Now() var tmp []string for _, name := range in { if d.cache.IsDir(now, name) { out = append(out, name) } else { tmp = append(tmp, name) } } in = tmp // Feed names into a channel. unfiltered := make(chan string, 100) b.Add(func(ctx context.Context) (err error) { defer close(unfiltered) for _, name := range in { select { case <-ctx.Done(): err = ctx.Err() return case unfiltered <- name: } } return }) // Stat the placeholder object for each, filtering out placeholders that are // not found. Use some parallelism. const statWorkers = 32 filtered := make(chan string, 100) var wg sync.WaitGroup for i := 0; i < statWorkers; i++ { wg.Add(1) b.Add(func(ctx context.Context) (err error) { defer wg.Done() err = filterMissingChildDirNames( ctx, d.bucket, d.Name(), unfiltered, filtered) return }) } go func() { wg.Wait() close(filtered) }() // Accumulate into a slice. var filteredSlice []string b.Add(func(ctx context.Context) (err error) { for name := range filtered { filteredSlice = append(filteredSlice, name) } return }) // Wait for everything to complete. err = b.Join() // Update the cache with everything we learned. now = d.cacheClock.Now() for _, name := range filteredSlice { d.cache.NoteDir(now, name) } // Return everything we learned. out = append(out, filteredSlice...) return } //////////////////////////////////////////////////////////////////////// // Public interface //////////////////////////////////////////////////////////////////////// func (d *dirInode) Lock() { d.mu.Lock() } func (d *dirInode) Unlock() { d.mu.Unlock() } func (d *dirInode) ID() fuseops.InodeID { return d.id } func (d *dirInode) Name() Name { return d.name } // LOCKS_REQUIRED(d) func (d *dirInode) IncrementLookupCount() { d.lc.Inc() } // LOCKS_REQUIRED(d) func (d *dirInode) DecrementLookupCount(n uint64) (destroy bool) { destroy = d.lc.Dec(n) return } // LOCKS_REQUIRED(d) func (d *dirInode) Destroy() (err error) { // Nothing interesting to do. return } // LOCKS_REQUIRED(d) func (d *dirInode) Attributes( ctx context.Context) (attrs fuseops.InodeAttributes, err error) { // Set up basic attributes. attrs = d.attrs attrs.Nlink = 1 return } func (d *dirInode) Bucket() gcsx.SyncerBucket { return d.bucket } // A suffix that can be used to unambiguously tag a file system name. // (Unambiguous because U+000A is not allowed in GCS object names.) This is // used to refer to the file/symlink in a (file/symlink, directory) pair with // conflicting object names. // // See also the notes on DirInode.LookUpChild. const ConflictingFileNameSuffix = "\n" // LOCKS_REQUIRED(d) func (d *dirInode) LookUpChild( ctx context.Context, name string) (result LookUpResult, err error) { // Consult the cache about the type of the child. This may save us work // below. now := d.cacheClock.Now() cacheSaysFile := d.cache.IsFile(now, name) cacheSaysDir := d.cache.IsDir(now, name) // Is this a conflict marker name? if strings.HasSuffix(name, ConflictingFileNameSuffix) { result, err = d.lookUpConflicting(ctx, name) return } // Stat the child as a file, unless the cache has told us it's a directory // but not a file. b := syncutil.NewBundle(ctx) var fileResult LookUpResult if !(cacheSaysDir && !cacheSaysFile) { b.Add(func(ctx context.Context) (err error) { fileResult, err = d.lookUpChildFile(ctx, name) return }) } // Stat the child as a directory, unless the cache has told us it's a file // but not a directory. var dirResult LookUpResult if !(cacheSaysFile && !cacheSaysDir) { b.Add(func(ctx context.Context) (err error) { dirResult, err = d.lookUpChildDir(ctx, name) return }) } // Wait for both. err = b.Join() if err != nil { return } // Prefer directories over files. switch { case dirResult.Exists(): result = dirResult case fileResult.Exists(): result = fileResult } // Update the cache. now = d.cacheClock.Now() if fileResult.Exists() { d.cache.NoteFile(now, name) } if dirResult.Exists() { d.cache.NoteDir(now, name) } return } // LOCKS_REQUIRED(d) func (d *dirInode) ReadEntries( ctx context.Context, tok string) (entries []fuseutil.Dirent, newTok string, err error) { // Ask the bucket to list some objects. req := &gcs.ListObjectsRequest{ Delimiter: "/", Prefix: d.Name().GcsObjectName(), ContinuationToken: tok, } listing, err := d.bucket.ListObjects(ctx, req) if err != nil { err = fmt.Errorf("ListObjects: %v", err) return } // Convert objects to entries for files or symlinks. for _, o := range listing.Objects { // Skip the entry for the backing object itself, which of course has its // own name as a prefix but which we don't wan to appear to contain itself. if o.Name == d.Name().GcsObjectName() { continue } e := fuseutil.Dirent{ Name: path.Base(o.Name), Type: fuseutil.DT_File, } if IsSymlink(o) { e.Type = fuseutil.DT_Link } entries = append(entries, e) } // Extract directory names from the collapsed runs. var dirNames []string for _, p := range listing.CollapsedRuns { dirNames = append(dirNames, path.Base(p)) } // Filter the directory names according to our implicit directory settings. dirNames, err = d.filterMissingChildDirs(ctx, dirNames) if err != nil { err = fmt.Errorf("filterMissingChildDirs: %v", err) return } // Return entries for directories. for _, name := range dirNames { e := fuseutil.Dirent{ Name: name, Type: fuseutil.DT_Directory, } entries = append(entries, e) } // Return an appropriate continuation token, if any. newTok = listing.ContinuationToken // Update the type cache with everything we learned. now := d.cacheClock.Now() for _, e := range entries { switch e.Type { case fuseutil.DT_File: d.cache.NoteFile(now, e.Name) case fuseutil.DT_Directory: d.cache.NoteDir(now, e.Name) } } return } // LOCKS_REQUIRED(d) func (d *dirInode) CreateChildFile( ctx context.Context, name string) (fn Name, o *gcs.Object, err error) { metadata := map[string]string{ FileMtimeMetadataKey: d.mtimeClock.Now().UTC().Format(time.RFC3339Nano), } fn = NewFileName(d.Name(), name) o, err = d.createNewObject(ctx, fn, metadata) if err != nil { return } d.cache.NoteFile(d.cacheClock.Now(), name) return } // LOCKS_REQUIRED(d) func (d *dirInode) CloneToChildFile( ctx context.Context, name string, src *gcs.Object) (fn Name, o *gcs.Object, err error) { // Erase any existing type information for this name. d.cache.Erase(name) fn = NewFileName(d.Name(), name) // Clone over anything that might already exist for the name. o, err = d.bucket.CopyObject( ctx, &gcs.CopyObjectRequest{ SrcName: src.Name, SrcGeneration: src.Generation, SrcMetaGenerationPrecondition: &src.MetaGeneration, DstName: fn.GcsObjectName(), }) if err != nil { return } // Update the type cache. d.cache.NoteFile(d.cacheClock.Now(), name) return } // LOCKS_REQUIRED(d) func (d *dirInode) CreateChildSymlink( ctx context.Context, name string, target string) (fn Name, o *gcs.Object, err error) { fn = NewFileName(d.Name(), name) metadata := map[string]string{ SymlinkMetadataKey: target, } o, err = d.createNewObject(ctx, fn, metadata) if err != nil { return } d.cache.NoteFile(d.cacheClock.Now(), name) return } // LOCKS_REQUIRED(d) func (d *dirInode) CreateChildDir( ctx context.Context, name string) (fn Name, o *gcs.Object, err error) { fn = NewDirName(d.Name(), name) o, err = d.createNewObject(ctx, fn, nil) if err != nil { return } d.cache.NoteDir(d.cacheClock.Now(), name) return } // LOCKS_REQUIRED(d) func (d *dirInode) DeleteChildFile( ctx context.Context, name string, generation int64, metaGeneration *int64) (err error) { d.cache.Erase(name) childName := NewFileName(d.Name(), name) err = d.bucket.DeleteObject( ctx, &gcs.DeleteObjectRequest{ Name: childName.GcsObjectName(), Generation: generation, MetaGenerationPrecondition: metaGeneration, }) if err != nil { err = fmt.Errorf("DeleteObject: %v", err) return } return } // LOCKS_REQUIRED(d) func (d *dirInode) DeleteChildDir( ctx context.Context, name string) (err error) { d.cache.Erase(name) childName := NewDirName(d.Name(), name) // Delete the backing object. Unfortunately we have no way to precondition // this on the directory being empty. err = d.bucket.DeleteObject( ctx, &gcs.DeleteObjectRequest{ Name: childName.GcsObjectName(), }) if err != nil {
}
err = fmt.Errorf("DeleteObject: %v", err) return } return
random_line_split
dir.go
// Copyright 2015 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package inode import ( "fmt" "path" "strings" "sync" "time" "github.com/simonwahlstrom/gcsfuse/internal/gcsx" "github.com/jacobsa/fuse/fuseops" "github.com/jacobsa/fuse/fuseutil" "github.com/jacobsa/gcloud/gcs" "github.com/jacobsa/syncutil" "github.com/jacobsa/timeutil" "golang.org/x/net/context" ) // IsDirName Does the supplied object name represent a directory (as opposed to a file or // symlink)? func IsDirName(name string) bool { return name == "" || name[len(name)-1] == '/' } // The result of looking up a child within a directory inode. See notes on // DirInode.LookUpChild for more info. type LookUpResult struct { // The GCS bucket where the lookup is performed. Bucket gcsx.SyncerBucket // For both object-backed children and implicit directories, the full // canonical name of the child. For example, if the parent inode is "foo/" // and the child is a directory, then this is "foo/bar/". // // Guaranteed to be present only if Exists(). FullName Name // The backing object for the child, if any. If the child is not found or // exists only as an implicit directory, this is nil. Object *gcs.Object // Does the child exist as a directory implicitly defined by its own // descendents? Meaningful only if Object is nil and implicit directories are // enabled for the parent inode. ImplicitDir bool } // Exists returns true iff the result indicates that the child exists, explicitly or // implicitly. func (lr *LookUpResult) Exists() bool { return lr.Object != nil || lr.ImplicitDir } // An inode representing a directory, with facilities for listing entries, // looking up children, and creating and deleting children. Must be locked for // any method additional to the Inode interface. type DirInode interface { BucketOwnedInode // Look up the direct child with the given relative name, returning // information about the object backing the child or whether it exists as an // implicit directory. If a file/symlink and a directory with the given name // both exist, the directory is preferred. Return a result with // !result.Exists() and a nil error if neither is found. // // Special case: if the name ends in ConflictingFileNameSuffix, we strip the // suffix, confirm that a conflicting directory exists, then return a result // for the file/symlink. // // If this inode was created with implicitDirs is set, this method will use // ListObjects to find child directories that are "implicitly" defined by the // existence of their own descendents. For example, if there is an object // named "foo/bar/baz" and this is the directory "foo", a child directory // named "bar" will be implied. In this case, result.ImplicitDir will be // true. LookUpChild( ctx context.Context, name string) (result LookUpResult, err error) // Read some number of entries from the directory, returning a continuation // token that can be used to pick up the read operation where it left off. // Supply the empty token on the first call. // // At the end of the directory, the returned continuation token will be // empty. Otherwise it will be non-empty. There is no guarantee about the // number of entries returned; it may be zero even with a non-empty // continuation token. // // The contents of the Offset and Inode fields for returned entries is // undefined. ReadEntries( ctx context.Context, tok string) (entries []fuseutil.Dirent, newTok string, err error) // Create an empty child file with the supplied (relative) name, failing with // *gcs.PreconditionError if a backing object already exists in GCS. // Return the full name of the child and the GCS object it backs up. CreateChildFile( ctx context.Context, name string) (fn Name, o *gcs.Object, err error) // Like CreateChildFile, except clone the supplied source object instead of // creating an empty object. // Return the full name of the child and the GCS object it backs up. CloneToChildFile( ctx context.Context, name string, src *gcs.Object) (fn Name, o *gcs.Object, err error) // Create a symlink object with the supplied (relative) name and the supplied // target, failing with *gcs.PreconditionError if a backing object already // exists in GCS. // Return the full name of the child and the GCS object it backs up. CreateChildSymlink( ctx context.Context, name string, target string) (fn Name, o *gcs.Object, err error) // Create a backing object for a child directory with the supplied (relative) // name, failing with *gcs.PreconditionError if a backing object already // exists in GCS. // Return the full name of the child and the GCS object it backs up. CreateChildDir( ctx context.Context, name string) (fn Name, o *gcs.Object, err error) // Delete the backing object for the child file or symlink with the given // (relative) name and generation number, where zero means the latest // generation. If the object/generation doesn't exist, no error is returned. // // metaGeneration may be set to a non-nil pointer giving a meta-generation // precondition, but need not be. DeleteChildFile( ctx context.Context, name string, generation int64, metaGeneration *int64) (err error) // Delete the backing object for the child directory with the given // (relative) name. DeleteChildDir( ctx context.Context, name string) (err error) } type dirInode struct { ///////////////////////// // Dependencies ///////////////////////// bucket gcsx.SyncerBucket mtimeClock timeutil.Clock cacheClock timeutil.Clock ///////////////////////// // Constant data ///////////////////////// id fuseops.InodeID implicitDirs bool // INVARIANT: name.IsDir() name Name attrs fuseops.InodeAttributes ///////////////////////// // Mutable state ///////////////////////// // A mutex that must be held when calling certain methods. See documentation // for each method. mu syncutil.InvariantMutex // GUARDED_BY(mu) lc lookupCount // cache.CheckInvariants() does not panic. // // GUARDED_BY(mu) cache typeCache } var _ DirInode = &dirInode{} // Create a directory inode for the name, representing the directory containing // the objects for which it is an immediate prefix. For the root directory, // this is the empty string. // // If implicitDirs is set, LookUpChild will use ListObjects to find child // directories that are "implicitly" defined by the existence of their own // descendents. For example, if there is an object named "foo/bar/baz" and this // is the directory "foo", a child directory named "bar" will be implied. // // If typeCacheTTL is non-zero, a cache from child name to information about // whether that name exists as a file/symlink and/or directory will be // maintained. This may speed up calls to LookUpChild, especially when combined // with a stat-caching GCS bucket, but comes at the cost of consistency: if the // child is removed and recreated with a different type before the expiration, // we may fail to find it. // // The initial lookup count is zero. // // REQUIRES: IsDirName(name) func NewDirInode( id fuseops.InodeID, name Name, attrs fuseops.InodeAttributes, implicitDirs bool, typeCacheTTL time.Duration, bucket gcsx.SyncerBucket, mtimeClock timeutil.Clock, cacheClock timeutil.Clock) (d DirInode) { if !name.IsDir() { panic(fmt.Sprintf("Unexpected name: %s", name)) } // Set up the struct. const typeCacheCapacity = 1 << 16 typed := &dirInode{ bucket: bucket, mtimeClock: mtimeClock, cacheClock: cacheClock, id: id, implicitDirs: implicitDirs, name: name, attrs: attrs, cache: newTypeCache(typeCacheCapacity/2, typeCacheTTL), } typed.lc.Init(id) // Set up invariant checking. typed.mu = syncutil.NewInvariantMutex(typed.checkInvariants) d = typed return } //////////////////////////////////////////////////////////////////////// // Helpers //////////////////////////////////////////////////////////////////////// func (d *dirInode) checkInvariants() { // INVARIANT: d.name.IsDir() if !d.name.IsDir() { panic(fmt.Sprintf("Unexpected name: %s", d.name)) } // cache.CheckInvariants() does not panic. d.cache.CheckInvariants() } func (d *dirInode) lookUpChildFile( ctx context.Context, name string) (result LookUpResult, err error) { result.Bucket = d.Bucket() result.FullName = NewFileName(d.Name(), name) result.Object, err = statObjectMayNotExist(ctx, d.bucket, result.FullName) if err != nil { err = fmt.Errorf("statObjectMayNotExist: %v", err) return } return } func (d *dirInode) lookUpChildDir( ctx context.Context, dirName string) (result LookUpResult, err error) { b := syncutil.NewBundle(ctx) childName := NewDirName(d.Name(), dirName) // Stat the placeholder object. b.Add(func(ctx context.Context) (err error) { result.Bucket = d.Bucket() result.FullName = childName result.Object, err = statObjectMayNotExist(ctx, d.bucket, result.FullName) if err != nil { err = fmt.Errorf("statObjectMayNotExist: %v", err) return } return }) // If implicit directories are enabled, find out whether the child name is // implicitly defined. if d.implicitDirs { b.Add(func(ctx context.Context) (err error) { result.ImplicitDir, err = objectNamePrefixNonEmpty( ctx, d.bucket, childName.GcsObjectName()) if err != nil { err = fmt.Errorf("objectNamePrefixNonEmpty: %v", err) return } return }) } // Wait for both. err = b.Join() if err != nil { return } return } // Look up the file for a (file, dir) pair with conflicting names, overriding // the default behavior. If the file doesn't exist, return a nil record with a // nil error. If the directory doesn't exist, pretend the file doesn't exist. // // REQUIRES: strings.HasSuffix(name, ConflictingFileNameSuffix) func (d *dirInode) lookUpConflicting( ctx context.Context, name string) (result LookUpResult, err error) { strippedName := strings.TrimSuffix(name, ConflictingFileNameSuffix) // In order to a marked name to be accepted, we require the conflicting // directory to exist. var dirResult LookUpResult dirResult, err = d.lookUpChildDir(ctx, strippedName) if err != nil { err = fmt.Errorf("lookUpChildDir for stripped name: %v", err) return } if !dirResult.Exists() { return } // The directory name exists. Find the conflicting file. result, err = d.lookUpChildFile(ctx, strippedName) if err != nil { err = fmt.Errorf("lookUpChildFile for stripped name: %v", err) return } return } // List the supplied object name prefix to find out whether it is non-empty. func objectNamePrefixNonEmpty( ctx context.Context, bucket gcs.Bucket, prefix string) (nonEmpty bool, err error) { req := &gcs.ListObjectsRequest{ Prefix: prefix, MaxResults: 1, } listing, err := bucket.ListObjects(ctx, req) if err != nil { err = fmt.Errorf("ListObjects: %v", err) return } nonEmpty = len(listing.Objects) != 0 return } // Stat the object with the given name, returning (nil, nil) if the object // doesn't exist rather than failing. func statObjectMayNotExist( ctx context.Context, bucket gcs.Bucket, name Name) (o *gcs.Object, err error) { // Call the bucket. req := &gcs.StatObjectRequest{ Name: name.GcsObjectName(), } o, err = bucket.StatObject(ctx, req) // Suppress "not found" errors. if _, ok := err.(*gcs.NotFoundError); ok { err = nil } // Annotate others. if err != nil { err = fmt.Errorf("StatObject: %v", err) return } return } // Fail if the name already exists. Pass on errors directly. func (d *dirInode) createNewObject( ctx context.Context, name Name, metadata map[string]string) (o *gcs.Object, err error) { // Create an empty backing object for the child, failing if it already // exists. var precond int64 createReq := &gcs.CreateObjectRequest{ Name: name.GcsObjectName(), Contents: strings.NewReader(""), GenerationPrecondition: &precond, Metadata: metadata, } o, err = d.bucket.CreateObject(ctx, createReq) if err != nil { return } return } // An implementation detail fo filterMissingChildDirs. func filterMissingChildDirNames( ctx context.Context, bucket gcs.Bucket, dirName Name, unfiltered <-chan string, filtered chan<- string) (err error) { for name := range unfiltered { var o *gcs.Object // Stat the placeholder. o, err = statObjectMayNotExist( ctx, bucket, NewDirName(dirName, name), ) if err != nil { err = fmt.Errorf("statObjectMayNotExist: %v", err) return } // Should we pass on this name? if o == nil { continue } select { case <-ctx.Done(): err = ctx.Err() return case filtered <- name: } } return } // Given a list of child names that appear to be directories according to // d.bucket.ListObjects (which always behaves as if implicit directories are // enabled), filter out the ones for which a placeholder object does not // actually exist. If implicit directories are enabled, simply return them all. // // LOCKS_REQUIRED(d) func (d *dirInode) filterMissingChildDirs( ctx context.Context, in []string) (out []string, err error) { // Do we need to do anything? if d.implicitDirs { out = in return } b := syncutil.NewBundle(ctx) // First add any names that we already know are directories according to our // cache, removing them from the input. now := d.cacheClock.Now() var tmp []string for _, name := range in { if d.cache.IsDir(now, name) { out = append(out, name) } else { tmp = append(tmp, name) } } in = tmp // Feed names into a channel. unfiltered := make(chan string, 100) b.Add(func(ctx context.Context) (err error) { defer close(unfiltered) for _, name := range in { select { case <-ctx.Done(): err = ctx.Err() return case unfiltered <- name: } } return }) // Stat the placeholder object for each, filtering out placeholders that are // not found. Use some parallelism. const statWorkers = 32 filtered := make(chan string, 100) var wg sync.WaitGroup for i := 0; i < statWorkers; i++ { wg.Add(1) b.Add(func(ctx context.Context) (err error) { defer wg.Done() err = filterMissingChildDirNames( ctx, d.bucket, d.Name(), unfiltered, filtered) return }) } go func() { wg.Wait() close(filtered) }() // Accumulate into a slice. var filteredSlice []string b.Add(func(ctx context.Context) (err error) { for name := range filtered { filteredSlice = append(filteredSlice, name) } return }) // Wait for everything to complete. err = b.Join() // Update the cache with everything we learned. now = d.cacheClock.Now() for _, name := range filteredSlice { d.cache.NoteDir(now, name) } // Return everything we learned. out = append(out, filteredSlice...) return } //////////////////////////////////////////////////////////////////////// // Public interface //////////////////////////////////////////////////////////////////////// func (d *dirInode) Lock() { d.mu.Lock() } func (d *dirInode) Unlock() { d.mu.Unlock() } func (d *dirInode) ID() fuseops.InodeID { return d.id } func (d *dirInode) Name() Name { return d.name } // LOCKS_REQUIRED(d) func (d *dirInode) IncrementLookupCount() { d.lc.Inc() } // LOCKS_REQUIRED(d) func (d *dirInode) DecrementLookupCount(n uint64) (destroy bool) { destroy = d.lc.Dec(n) return } // LOCKS_REQUIRED(d) func (d *dirInode) Destroy() (err error) { // Nothing interesting to do. return } // LOCKS_REQUIRED(d) func (d *dirInode) Attributes( ctx context.Context) (attrs fuseops.InodeAttributes, err error) { // Set up basic attributes. attrs = d.attrs attrs.Nlink = 1 return } func (d *dirInode) Bucket() gcsx.SyncerBucket { return d.bucket } // A suffix that can be used to unambiguously tag a file system name. // (Unambiguous because U+000A is not allowed in GCS object names.) This is // used to refer to the file/symlink in a (file/symlink, directory) pair with // conflicting object names. // // See also the notes on DirInode.LookUpChild. const ConflictingFileNameSuffix = "\n" // LOCKS_REQUIRED(d) func (d *dirInode) LookUpChild( ctx context.Context, name string) (result LookUpResult, err error) { // Consult the cache about the type of the child. This may save us work // below. now := d.cacheClock.Now() cacheSaysFile := d.cache.IsFile(now, name) cacheSaysDir := d.cache.IsDir(now, name) // Is this a conflict marker name? if strings.HasSuffix(name, ConflictingFileNameSuffix) { result, err = d.lookUpConflicting(ctx, name) return } // Stat the child as a file, unless the cache has told us it's a directory // but not a file. b := syncutil.NewBundle(ctx) var fileResult LookUpResult if !(cacheSaysDir && !cacheSaysFile) { b.Add(func(ctx context.Context) (err error) { fileResult, err = d.lookUpChildFile(ctx, name) return }) } // Stat the child as a directory, unless the cache has told us it's a file // but not a directory. var dirResult LookUpResult if !(cacheSaysFile && !cacheSaysDir) { b.Add(func(ctx context.Context) (err error) { dirResult, err = d.lookUpChildDir(ctx, name) return }) } // Wait for both. err = b.Join() if err != nil { return } // Prefer directories over files. switch { case dirResult.Exists(): result = dirResult case fileResult.Exists(): result = fileResult } // Update the cache. now = d.cacheClock.Now() if fileResult.Exists() { d.cache.NoteFile(now, name) } if dirResult.Exists() { d.cache.NoteDir(now, name) } return } // LOCKS_REQUIRED(d) func (d *dirInode) ReadEntries( ctx context.Context, tok string) (entries []fuseutil.Dirent, newTok string, err error) { // Ask the bucket to list some objects. req := &gcs.ListObjectsRequest{ Delimiter: "/", Prefix: d.Name().GcsObjectName(), ContinuationToken: tok, } listing, err := d.bucket.ListObjects(ctx, req) if err != nil { err = fmt.Errorf("ListObjects: %v", err) return } // Convert objects to entries for files or symlinks. for _, o := range listing.Objects { // Skip the entry for the backing object itself, which of course has its // own name as a prefix but which we don't wan to appear to contain itself. if o.Name == d.Name().GcsObjectName() { continue } e := fuseutil.Dirent{ Name: path.Base(o.Name), Type: fuseutil.DT_File, } if IsSymlink(o) { e.Type = fuseutil.DT_Link } entries = append(entries, e) } // Extract directory names from the collapsed runs. var dirNames []string for _, p := range listing.CollapsedRuns { dirNames = append(dirNames, path.Base(p)) } // Filter the directory names according to our implicit directory settings. dirNames, err = d.filterMissingChildDirs(ctx, dirNames) if err != nil { err = fmt.Errorf("filterMissingChildDirs: %v", err) return } // Return entries for directories. for _, name := range dirNames { e := fuseutil.Dirent{ Name: name, Type: fuseutil.DT_Directory, } entries = append(entries, e) } // Return an appropriate continuation token, if any. newTok = listing.ContinuationToken // Update the type cache with everything we learned. now := d.cacheClock.Now() for _, e := range entries { switch e.Type { case fuseutil.DT_File: d.cache.NoteFile(now, e.Name) case fuseutil.DT_Directory: d.cache.NoteDir(now, e.Name) } } return } // LOCKS_REQUIRED(d) func (d *dirInode) CreateChildFile( ctx context.Context, name string) (fn Name, o *gcs.Object, err error) { metadata := map[string]string{ FileMtimeMetadataKey: d.mtimeClock.Now().UTC().Format(time.RFC3339Nano), } fn = NewFileName(d.Name(), name) o, err = d.createNewObject(ctx, fn, metadata) if err != nil
d.cache.NoteFile(d.cacheClock.Now(), name) return } // LOCKS_REQUIRED(d) func (d *dirInode) CloneToChildFile( ctx context.Context, name string, src *gcs.Object) (fn Name, o *gcs.Object, err error) { // Erase any existing type information for this name. d.cache.Erase(name) fn = NewFileName(d.Name(), name) // Clone over anything that might already exist for the name. o, err = d.bucket.CopyObject( ctx, &gcs.CopyObjectRequest{ SrcName: src.Name, SrcGeneration: src.Generation, SrcMetaGenerationPrecondition: &src.MetaGeneration, DstName: fn.GcsObjectName(), }) if err != nil { return } // Update the type cache. d.cache.NoteFile(d.cacheClock.Now(), name) return } // LOCKS_REQUIRED(d) func (d *dirInode) CreateChildSymlink( ctx context.Context, name string, target string) (fn Name, o *gcs.Object, err error) { fn = NewFileName(d.Name(), name) metadata := map[string]string{ SymlinkMetadataKey: target, } o, err = d.createNewObject(ctx, fn, metadata) if err != nil { return } d.cache.NoteFile(d.cacheClock.Now(), name) return } // LOCKS_REQUIRED(d) func (d *dirInode) CreateChildDir( ctx context.Context, name string) (fn Name, o *gcs.Object, err error) { fn = NewDirName(d.Name(), name) o, err = d.createNewObject(ctx, fn, nil) if err != nil { return } d.cache.NoteDir(d.cacheClock.Now(), name) return } // LOCKS_REQUIRED(d) func (d *dirInode) DeleteChildFile( ctx context.Context, name string, generation int64, metaGeneration *int64) (err error) { d.cache.Erase(name) childName := NewFileName(d.Name(), name) err = d.bucket.DeleteObject( ctx, &gcs.DeleteObjectRequest{ Name: childName.GcsObjectName(), Generation: generation, MetaGenerationPrecondition: metaGeneration, }) if err != nil { err = fmt.Errorf("DeleteObject: %v", err) return } return } // LOCKS_REQUIRED(d) func (d *dirInode) DeleteChildDir( ctx context.Context, name string) (err error) { d.cache.Erase(name) childName := NewDirName(d.Name(), name) // Delete the backing object. Unfortunately we have no way to precondition // this on the directory being empty. err = d.bucket.DeleteObject( ctx, &gcs.DeleteObjectRequest{ Name: childName.GcsObjectName(), }) if err != nil { err = fmt.Errorf("DeleteObject: %v", err) return } return }
{ return }
conditional_block
dir.go
// Copyright 2015 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package inode import ( "fmt" "path" "strings" "sync" "time" "github.com/simonwahlstrom/gcsfuse/internal/gcsx" "github.com/jacobsa/fuse/fuseops" "github.com/jacobsa/fuse/fuseutil" "github.com/jacobsa/gcloud/gcs" "github.com/jacobsa/syncutil" "github.com/jacobsa/timeutil" "golang.org/x/net/context" ) // IsDirName Does the supplied object name represent a directory (as opposed to a file or // symlink)? func IsDirName(name string) bool { return name == "" || name[len(name)-1] == '/' } // The result of looking up a child within a directory inode. See notes on // DirInode.LookUpChild for more info. type LookUpResult struct { // The GCS bucket where the lookup is performed. Bucket gcsx.SyncerBucket // For both object-backed children and implicit directories, the full // canonical name of the child. For example, if the parent inode is "foo/" // and the child is a directory, then this is "foo/bar/". // // Guaranteed to be present only if Exists(). FullName Name // The backing object for the child, if any. If the child is not found or // exists only as an implicit directory, this is nil. Object *gcs.Object // Does the child exist as a directory implicitly defined by its own // descendents? Meaningful only if Object is nil and implicit directories are // enabled for the parent inode. ImplicitDir bool } // Exists returns true iff the result indicates that the child exists, explicitly or // implicitly. func (lr *LookUpResult) Exists() bool { return lr.Object != nil || lr.ImplicitDir } // An inode representing a directory, with facilities for listing entries, // looking up children, and creating and deleting children. Must be locked for // any method additional to the Inode interface. type DirInode interface { BucketOwnedInode // Look up the direct child with the given relative name, returning // information about the object backing the child or whether it exists as an // implicit directory. If a file/symlink and a directory with the given name // both exist, the directory is preferred. Return a result with // !result.Exists() and a nil error if neither is found. // // Special case: if the name ends in ConflictingFileNameSuffix, we strip the // suffix, confirm that a conflicting directory exists, then return a result // for the file/symlink. // // If this inode was created with implicitDirs is set, this method will use // ListObjects to find child directories that are "implicitly" defined by the // existence of their own descendents. For example, if there is an object // named "foo/bar/baz" and this is the directory "foo", a child directory // named "bar" will be implied. In this case, result.ImplicitDir will be // true. LookUpChild( ctx context.Context, name string) (result LookUpResult, err error) // Read some number of entries from the directory, returning a continuation // token that can be used to pick up the read operation where it left off. // Supply the empty token on the first call. // // At the end of the directory, the returned continuation token will be // empty. Otherwise it will be non-empty. There is no guarantee about the // number of entries returned; it may be zero even with a non-empty // continuation token. // // The contents of the Offset and Inode fields for returned entries is // undefined. ReadEntries( ctx context.Context, tok string) (entries []fuseutil.Dirent, newTok string, err error) // Create an empty child file with the supplied (relative) name, failing with // *gcs.PreconditionError if a backing object already exists in GCS. // Return the full name of the child and the GCS object it backs up. CreateChildFile( ctx context.Context, name string) (fn Name, o *gcs.Object, err error) // Like CreateChildFile, except clone the supplied source object instead of // creating an empty object. // Return the full name of the child and the GCS object it backs up. CloneToChildFile( ctx context.Context, name string, src *gcs.Object) (fn Name, o *gcs.Object, err error) // Create a symlink object with the supplied (relative) name and the supplied // target, failing with *gcs.PreconditionError if a backing object already // exists in GCS. // Return the full name of the child and the GCS object it backs up. CreateChildSymlink( ctx context.Context, name string, target string) (fn Name, o *gcs.Object, err error) // Create a backing object for a child directory with the supplied (relative) // name, failing with *gcs.PreconditionError if a backing object already // exists in GCS. // Return the full name of the child and the GCS object it backs up. CreateChildDir( ctx context.Context, name string) (fn Name, o *gcs.Object, err error) // Delete the backing object for the child file or symlink with the given // (relative) name and generation number, where zero means the latest // generation. If the object/generation doesn't exist, no error is returned. // // metaGeneration may be set to a non-nil pointer giving a meta-generation // precondition, but need not be. DeleteChildFile( ctx context.Context, name string, generation int64, metaGeneration *int64) (err error) // Delete the backing object for the child directory with the given // (relative) name. DeleteChildDir( ctx context.Context, name string) (err error) } type dirInode struct { ///////////////////////// // Dependencies ///////////////////////// bucket gcsx.SyncerBucket mtimeClock timeutil.Clock cacheClock timeutil.Clock ///////////////////////// // Constant data ///////////////////////// id fuseops.InodeID implicitDirs bool // INVARIANT: name.IsDir() name Name attrs fuseops.InodeAttributes ///////////////////////// // Mutable state ///////////////////////// // A mutex that must be held when calling certain methods. See documentation // for each method. mu syncutil.InvariantMutex // GUARDED_BY(mu) lc lookupCount // cache.CheckInvariants() does not panic. // // GUARDED_BY(mu) cache typeCache } var _ DirInode = &dirInode{} // Create a directory inode for the name, representing the directory containing // the objects for which it is an immediate prefix. For the root directory, // this is the empty string. // // If implicitDirs is set, LookUpChild will use ListObjects to find child // directories that are "implicitly" defined by the existence of their own // descendents. For example, if there is an object named "foo/bar/baz" and this // is the directory "foo", a child directory named "bar" will be implied. // // If typeCacheTTL is non-zero, a cache from child name to information about // whether that name exists as a file/symlink and/or directory will be // maintained. This may speed up calls to LookUpChild, especially when combined // with a stat-caching GCS bucket, but comes at the cost of consistency: if the // child is removed and recreated with a different type before the expiration, // we may fail to find it. // // The initial lookup count is zero. // // REQUIRES: IsDirName(name) func NewDirInode( id fuseops.InodeID, name Name, attrs fuseops.InodeAttributes, implicitDirs bool, typeCacheTTL time.Duration, bucket gcsx.SyncerBucket, mtimeClock timeutil.Clock, cacheClock timeutil.Clock) (d DirInode) { if !name.IsDir() { panic(fmt.Sprintf("Unexpected name: %s", name)) } // Set up the struct. const typeCacheCapacity = 1 << 16 typed := &dirInode{ bucket: bucket, mtimeClock: mtimeClock, cacheClock: cacheClock, id: id, implicitDirs: implicitDirs, name: name, attrs: attrs, cache: newTypeCache(typeCacheCapacity/2, typeCacheTTL), } typed.lc.Init(id) // Set up invariant checking. typed.mu = syncutil.NewInvariantMutex(typed.checkInvariants) d = typed return } //////////////////////////////////////////////////////////////////////// // Helpers //////////////////////////////////////////////////////////////////////// func (d *dirInode) checkInvariants() { // INVARIANT: d.name.IsDir() if !d.name.IsDir() { panic(fmt.Sprintf("Unexpected name: %s", d.name)) } // cache.CheckInvariants() does not panic. d.cache.CheckInvariants() } func (d *dirInode) lookUpChildFile( ctx context.Context, name string) (result LookUpResult, err error) { result.Bucket = d.Bucket() result.FullName = NewFileName(d.Name(), name) result.Object, err = statObjectMayNotExist(ctx, d.bucket, result.FullName) if err != nil { err = fmt.Errorf("statObjectMayNotExist: %v", err) return } return } func (d *dirInode) lookUpChildDir( ctx context.Context, dirName string) (result LookUpResult, err error) { b := syncutil.NewBundle(ctx) childName := NewDirName(d.Name(), dirName) // Stat the placeholder object. b.Add(func(ctx context.Context) (err error) { result.Bucket = d.Bucket() result.FullName = childName result.Object, err = statObjectMayNotExist(ctx, d.bucket, result.FullName) if err != nil { err = fmt.Errorf("statObjectMayNotExist: %v", err) return } return }) // If implicit directories are enabled, find out whether the child name is // implicitly defined. if d.implicitDirs { b.Add(func(ctx context.Context) (err error) { result.ImplicitDir, err = objectNamePrefixNonEmpty( ctx, d.bucket, childName.GcsObjectName()) if err != nil { err = fmt.Errorf("objectNamePrefixNonEmpty: %v", err) return } return }) } // Wait for both. err = b.Join() if err != nil { return } return } // Look up the file for a (file, dir) pair with conflicting names, overriding // the default behavior. If the file doesn't exist, return a nil record with a // nil error. If the directory doesn't exist, pretend the file doesn't exist. // // REQUIRES: strings.HasSuffix(name, ConflictingFileNameSuffix) func (d *dirInode) lookUpConflicting( ctx context.Context, name string) (result LookUpResult, err error) { strippedName := strings.TrimSuffix(name, ConflictingFileNameSuffix) // In order to a marked name to be accepted, we require the conflicting // directory to exist. var dirResult LookUpResult dirResult, err = d.lookUpChildDir(ctx, strippedName) if err != nil { err = fmt.Errorf("lookUpChildDir for stripped name: %v", err) return } if !dirResult.Exists() { return } // The directory name exists. Find the conflicting file. result, err = d.lookUpChildFile(ctx, strippedName) if err != nil { err = fmt.Errorf("lookUpChildFile for stripped name: %v", err) return } return } // List the supplied object name prefix to find out whether it is non-empty. func objectNamePrefixNonEmpty( ctx context.Context, bucket gcs.Bucket, prefix string) (nonEmpty bool, err error) { req := &gcs.ListObjectsRequest{ Prefix: prefix, MaxResults: 1, } listing, err := bucket.ListObjects(ctx, req) if err != nil { err = fmt.Errorf("ListObjects: %v", err) return } nonEmpty = len(listing.Objects) != 0 return } // Stat the object with the given name, returning (nil, nil) if the object // doesn't exist rather than failing. func statObjectMayNotExist( ctx context.Context, bucket gcs.Bucket, name Name) (o *gcs.Object, err error) { // Call the bucket. req := &gcs.StatObjectRequest{ Name: name.GcsObjectName(), } o, err = bucket.StatObject(ctx, req) // Suppress "not found" errors. if _, ok := err.(*gcs.NotFoundError); ok { err = nil } // Annotate others. if err != nil { err = fmt.Errorf("StatObject: %v", err) return } return } // Fail if the name already exists. Pass on errors directly. func (d *dirInode) createNewObject( ctx context.Context, name Name, metadata map[string]string) (o *gcs.Object, err error) { // Create an empty backing object for the child, failing if it already // exists. var precond int64 createReq := &gcs.CreateObjectRequest{ Name: name.GcsObjectName(), Contents: strings.NewReader(""), GenerationPrecondition: &precond, Metadata: metadata, } o, err = d.bucket.CreateObject(ctx, createReq) if err != nil { return } return } // An implementation detail fo filterMissingChildDirs. func filterMissingChildDirNames( ctx context.Context, bucket gcs.Bucket, dirName Name, unfiltered <-chan string, filtered chan<- string) (err error) { for name := range unfiltered { var o *gcs.Object // Stat the placeholder. o, err = statObjectMayNotExist( ctx, bucket, NewDirName(dirName, name), ) if err != nil { err = fmt.Errorf("statObjectMayNotExist: %v", err) return } // Should we pass on this name? if o == nil { continue } select { case <-ctx.Done(): err = ctx.Err() return case filtered <- name: } } return } // Given a list of child names that appear to be directories according to // d.bucket.ListObjects (which always behaves as if implicit directories are // enabled), filter out the ones for which a placeholder object does not // actually exist. If implicit directories are enabled, simply return them all. // // LOCKS_REQUIRED(d) func (d *dirInode) filterMissingChildDirs( ctx context.Context, in []string) (out []string, err error) { // Do we need to do anything? if d.implicitDirs { out = in return } b := syncutil.NewBundle(ctx) // First add any names that we already know are directories according to our // cache, removing them from the input. now := d.cacheClock.Now() var tmp []string for _, name := range in { if d.cache.IsDir(now, name) { out = append(out, name) } else { tmp = append(tmp, name) } } in = tmp // Feed names into a channel. unfiltered := make(chan string, 100) b.Add(func(ctx context.Context) (err error) { defer close(unfiltered) for _, name := range in { select { case <-ctx.Done(): err = ctx.Err() return case unfiltered <- name: } } return }) // Stat the placeholder object for each, filtering out placeholders that are // not found. Use some parallelism. const statWorkers = 32 filtered := make(chan string, 100) var wg sync.WaitGroup for i := 0; i < statWorkers; i++ { wg.Add(1) b.Add(func(ctx context.Context) (err error) { defer wg.Done() err = filterMissingChildDirNames( ctx, d.bucket, d.Name(), unfiltered, filtered) return }) } go func() { wg.Wait() close(filtered) }() // Accumulate into a slice. var filteredSlice []string b.Add(func(ctx context.Context) (err error) { for name := range filtered { filteredSlice = append(filteredSlice, name) } return }) // Wait for everything to complete. err = b.Join() // Update the cache with everything we learned. now = d.cacheClock.Now() for _, name := range filteredSlice { d.cache.NoteDir(now, name) } // Return everything we learned. out = append(out, filteredSlice...) return } //////////////////////////////////////////////////////////////////////// // Public interface //////////////////////////////////////////////////////////////////////// func (d *dirInode) Lock() { d.mu.Lock() } func (d *dirInode) Unlock() { d.mu.Unlock() } func (d *dirInode) ID() fuseops.InodeID { return d.id } func (d *dirInode) Name() Name { return d.name } // LOCKS_REQUIRED(d) func (d *dirInode) IncrementLookupCount() { d.lc.Inc() } // LOCKS_REQUIRED(d) func (d *dirInode) DecrementLookupCount(n uint64) (destroy bool) { destroy = d.lc.Dec(n) return } // LOCKS_REQUIRED(d) func (d *dirInode) Destroy() (err error) { // Nothing interesting to do. return } // LOCKS_REQUIRED(d) func (d *dirInode) Attributes( ctx context.Context) (attrs fuseops.InodeAttributes, err error) { // Set up basic attributes. attrs = d.attrs attrs.Nlink = 1 return } func (d *dirInode) Bucket() gcsx.SyncerBucket { return d.bucket } // A suffix that can be used to unambiguously tag a file system name. // (Unambiguous because U+000A is not allowed in GCS object names.) This is // used to refer to the file/symlink in a (file/symlink, directory) pair with // conflicting object names. // // See also the notes on DirInode.LookUpChild. const ConflictingFileNameSuffix = "\n" // LOCKS_REQUIRED(d) func (d *dirInode) LookUpChild( ctx context.Context, name string) (result LookUpResult, err error) { // Consult the cache about the type of the child. This may save us work // below. now := d.cacheClock.Now() cacheSaysFile := d.cache.IsFile(now, name) cacheSaysDir := d.cache.IsDir(now, name) // Is this a conflict marker name? if strings.HasSuffix(name, ConflictingFileNameSuffix) { result, err = d.lookUpConflicting(ctx, name) return } // Stat the child as a file, unless the cache has told us it's a directory // but not a file. b := syncutil.NewBundle(ctx) var fileResult LookUpResult if !(cacheSaysDir && !cacheSaysFile) { b.Add(func(ctx context.Context) (err error) { fileResult, err = d.lookUpChildFile(ctx, name) return }) } // Stat the child as a directory, unless the cache has told us it's a file // but not a directory. var dirResult LookUpResult if !(cacheSaysFile && !cacheSaysDir) { b.Add(func(ctx context.Context) (err error) { dirResult, err = d.lookUpChildDir(ctx, name) return }) } // Wait for both. err = b.Join() if err != nil { return } // Prefer directories over files. switch { case dirResult.Exists(): result = dirResult case fileResult.Exists(): result = fileResult } // Update the cache. now = d.cacheClock.Now() if fileResult.Exists() { d.cache.NoteFile(now, name) } if dirResult.Exists() { d.cache.NoteDir(now, name) } return } // LOCKS_REQUIRED(d) func (d *dirInode) ReadEntries( ctx context.Context, tok string) (entries []fuseutil.Dirent, newTok string, err error) { // Ask the bucket to list some objects. req := &gcs.ListObjectsRequest{ Delimiter: "/", Prefix: d.Name().GcsObjectName(), ContinuationToken: tok, } listing, err := d.bucket.ListObjects(ctx, req) if err != nil { err = fmt.Errorf("ListObjects: %v", err) return } // Convert objects to entries for files or symlinks. for _, o := range listing.Objects { // Skip the entry for the backing object itself, which of course has its // own name as a prefix but which we don't wan to appear to contain itself. if o.Name == d.Name().GcsObjectName() { continue } e := fuseutil.Dirent{ Name: path.Base(o.Name), Type: fuseutil.DT_File, } if IsSymlink(o) { e.Type = fuseutil.DT_Link } entries = append(entries, e) } // Extract directory names from the collapsed runs. var dirNames []string for _, p := range listing.CollapsedRuns { dirNames = append(dirNames, path.Base(p)) } // Filter the directory names according to our implicit directory settings. dirNames, err = d.filterMissingChildDirs(ctx, dirNames) if err != nil { err = fmt.Errorf("filterMissingChildDirs: %v", err) return } // Return entries for directories. for _, name := range dirNames { e := fuseutil.Dirent{ Name: name, Type: fuseutil.DT_Directory, } entries = append(entries, e) } // Return an appropriate continuation token, if any. newTok = listing.ContinuationToken // Update the type cache with everything we learned. now := d.cacheClock.Now() for _, e := range entries { switch e.Type { case fuseutil.DT_File: d.cache.NoteFile(now, e.Name) case fuseutil.DT_Directory: d.cache.NoteDir(now, e.Name) } } return } // LOCKS_REQUIRED(d) func (d *dirInode) CreateChildFile( ctx context.Context, name string) (fn Name, o *gcs.Object, err error) { metadata := map[string]string{ FileMtimeMetadataKey: d.mtimeClock.Now().UTC().Format(time.RFC3339Nano), } fn = NewFileName(d.Name(), name) o, err = d.createNewObject(ctx, fn, metadata) if err != nil { return } d.cache.NoteFile(d.cacheClock.Now(), name) return } // LOCKS_REQUIRED(d) func (d *dirInode) CloneToChildFile( ctx context.Context, name string, src *gcs.Object) (fn Name, o *gcs.Object, err error) { // Erase any existing type information for this name. d.cache.Erase(name) fn = NewFileName(d.Name(), name) // Clone over anything that might already exist for the name. o, err = d.bucket.CopyObject( ctx, &gcs.CopyObjectRequest{ SrcName: src.Name, SrcGeneration: src.Generation, SrcMetaGenerationPrecondition: &src.MetaGeneration, DstName: fn.GcsObjectName(), }) if err != nil { return } // Update the type cache. d.cache.NoteFile(d.cacheClock.Now(), name) return } // LOCKS_REQUIRED(d) func (d *dirInode) CreateChildSymlink( ctx context.Context, name string, target string) (fn Name, o *gcs.Object, err error)
// LOCKS_REQUIRED(d) func (d *dirInode) CreateChildDir( ctx context.Context, name string) (fn Name, o *gcs.Object, err error) { fn = NewDirName(d.Name(), name) o, err = d.createNewObject(ctx, fn, nil) if err != nil { return } d.cache.NoteDir(d.cacheClock.Now(), name) return } // LOCKS_REQUIRED(d) func (d *dirInode) DeleteChildFile( ctx context.Context, name string, generation int64, metaGeneration *int64) (err error) { d.cache.Erase(name) childName := NewFileName(d.Name(), name) err = d.bucket.DeleteObject( ctx, &gcs.DeleteObjectRequest{ Name: childName.GcsObjectName(), Generation: generation, MetaGenerationPrecondition: metaGeneration, }) if err != nil { err = fmt.Errorf("DeleteObject: %v", err) return } return } // LOCKS_REQUIRED(d) func (d *dirInode) DeleteChildDir( ctx context.Context, name string) (err error) { d.cache.Erase(name) childName := NewDirName(d.Name(), name) // Delete the backing object. Unfortunately we have no way to precondition // this on the directory being empty. err = d.bucket.DeleteObject( ctx, &gcs.DeleteObjectRequest{ Name: childName.GcsObjectName(), }) if err != nil { err = fmt.Errorf("DeleteObject: %v", err) return } return }
{ fn = NewFileName(d.Name(), name) metadata := map[string]string{ SymlinkMetadataKey: target, } o, err = d.createNewObject(ctx, fn, metadata) if err != nil { return } d.cache.NoteFile(d.cacheClock.Now(), name) return }
identifier_body
dir.go
// Copyright 2015 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package inode import ( "fmt" "path" "strings" "sync" "time" "github.com/simonwahlstrom/gcsfuse/internal/gcsx" "github.com/jacobsa/fuse/fuseops" "github.com/jacobsa/fuse/fuseutil" "github.com/jacobsa/gcloud/gcs" "github.com/jacobsa/syncutil" "github.com/jacobsa/timeutil" "golang.org/x/net/context" ) // IsDirName Does the supplied object name represent a directory (as opposed to a file or // symlink)? func IsDirName(name string) bool { return name == "" || name[len(name)-1] == '/' } // The result of looking up a child within a directory inode. See notes on // DirInode.LookUpChild for more info. type LookUpResult struct { // The GCS bucket where the lookup is performed. Bucket gcsx.SyncerBucket // For both object-backed children and implicit directories, the full // canonical name of the child. For example, if the parent inode is "foo/" // and the child is a directory, then this is "foo/bar/". // // Guaranteed to be present only if Exists(). FullName Name // The backing object for the child, if any. If the child is not found or // exists only as an implicit directory, this is nil. Object *gcs.Object // Does the child exist as a directory implicitly defined by its own // descendents? Meaningful only if Object is nil and implicit directories are // enabled for the parent inode. ImplicitDir bool } // Exists returns true iff the result indicates that the child exists, explicitly or // implicitly. func (lr *LookUpResult) Exists() bool { return lr.Object != nil || lr.ImplicitDir } // An inode representing a directory, with facilities for listing entries, // looking up children, and creating and deleting children. Must be locked for // any method additional to the Inode interface. type DirInode interface { BucketOwnedInode // Look up the direct child with the given relative name, returning // information about the object backing the child or whether it exists as an // implicit directory. If a file/symlink and a directory with the given name // both exist, the directory is preferred. Return a result with // !result.Exists() and a nil error if neither is found. // // Special case: if the name ends in ConflictingFileNameSuffix, we strip the // suffix, confirm that a conflicting directory exists, then return a result // for the file/symlink. // // If this inode was created with implicitDirs is set, this method will use // ListObjects to find child directories that are "implicitly" defined by the // existence of their own descendents. For example, if there is an object // named "foo/bar/baz" and this is the directory "foo", a child directory // named "bar" will be implied. In this case, result.ImplicitDir will be // true. LookUpChild( ctx context.Context, name string) (result LookUpResult, err error) // Read some number of entries from the directory, returning a continuation // token that can be used to pick up the read operation where it left off. // Supply the empty token on the first call. // // At the end of the directory, the returned continuation token will be // empty. Otherwise it will be non-empty. There is no guarantee about the // number of entries returned; it may be zero even with a non-empty // continuation token. // // The contents of the Offset and Inode fields for returned entries is // undefined. ReadEntries( ctx context.Context, tok string) (entries []fuseutil.Dirent, newTok string, err error) // Create an empty child file with the supplied (relative) name, failing with // *gcs.PreconditionError if a backing object already exists in GCS. // Return the full name of the child and the GCS object it backs up. CreateChildFile( ctx context.Context, name string) (fn Name, o *gcs.Object, err error) // Like CreateChildFile, except clone the supplied source object instead of // creating an empty object. // Return the full name of the child and the GCS object it backs up. CloneToChildFile( ctx context.Context, name string, src *gcs.Object) (fn Name, o *gcs.Object, err error) // Create a symlink object with the supplied (relative) name and the supplied // target, failing with *gcs.PreconditionError if a backing object already // exists in GCS. // Return the full name of the child and the GCS object it backs up. CreateChildSymlink( ctx context.Context, name string, target string) (fn Name, o *gcs.Object, err error) // Create a backing object for a child directory with the supplied (relative) // name, failing with *gcs.PreconditionError if a backing object already // exists in GCS. // Return the full name of the child and the GCS object it backs up. CreateChildDir( ctx context.Context, name string) (fn Name, o *gcs.Object, err error) // Delete the backing object for the child file or symlink with the given // (relative) name and generation number, where zero means the latest // generation. If the object/generation doesn't exist, no error is returned. // // metaGeneration may be set to a non-nil pointer giving a meta-generation // precondition, but need not be. DeleteChildFile( ctx context.Context, name string, generation int64, metaGeneration *int64) (err error) // Delete the backing object for the child directory with the given // (relative) name. DeleteChildDir( ctx context.Context, name string) (err error) } type dirInode struct { ///////////////////////// // Dependencies ///////////////////////// bucket gcsx.SyncerBucket mtimeClock timeutil.Clock cacheClock timeutil.Clock ///////////////////////// // Constant data ///////////////////////// id fuseops.InodeID implicitDirs bool // INVARIANT: name.IsDir() name Name attrs fuseops.InodeAttributes ///////////////////////// // Mutable state ///////////////////////// // A mutex that must be held when calling certain methods. See documentation // for each method. mu syncutil.InvariantMutex // GUARDED_BY(mu) lc lookupCount // cache.CheckInvariants() does not panic. // // GUARDED_BY(mu) cache typeCache } var _ DirInode = &dirInode{} // Create a directory inode for the name, representing the directory containing // the objects for which it is an immediate prefix. For the root directory, // this is the empty string. // // If implicitDirs is set, LookUpChild will use ListObjects to find child // directories that are "implicitly" defined by the existence of their own // descendents. For example, if there is an object named "foo/bar/baz" and this // is the directory "foo", a child directory named "bar" will be implied. // // If typeCacheTTL is non-zero, a cache from child name to information about // whether that name exists as a file/symlink and/or directory will be // maintained. This may speed up calls to LookUpChild, especially when combined // with a stat-caching GCS bucket, but comes at the cost of consistency: if the // child is removed and recreated with a different type before the expiration, // we may fail to find it. // // The initial lookup count is zero. // // REQUIRES: IsDirName(name) func NewDirInode( id fuseops.InodeID, name Name, attrs fuseops.InodeAttributes, implicitDirs bool, typeCacheTTL time.Duration, bucket gcsx.SyncerBucket, mtimeClock timeutil.Clock, cacheClock timeutil.Clock) (d DirInode) { if !name.IsDir() { panic(fmt.Sprintf("Unexpected name: %s", name)) } // Set up the struct. const typeCacheCapacity = 1 << 16 typed := &dirInode{ bucket: bucket, mtimeClock: mtimeClock, cacheClock: cacheClock, id: id, implicitDirs: implicitDirs, name: name, attrs: attrs, cache: newTypeCache(typeCacheCapacity/2, typeCacheTTL), } typed.lc.Init(id) // Set up invariant checking. typed.mu = syncutil.NewInvariantMutex(typed.checkInvariants) d = typed return } //////////////////////////////////////////////////////////////////////// // Helpers //////////////////////////////////////////////////////////////////////// func (d *dirInode) checkInvariants() { // INVARIANT: d.name.IsDir() if !d.name.IsDir() { panic(fmt.Sprintf("Unexpected name: %s", d.name)) } // cache.CheckInvariants() does not panic. d.cache.CheckInvariants() } func (d *dirInode) lookUpChildFile( ctx context.Context, name string) (result LookUpResult, err error) { result.Bucket = d.Bucket() result.FullName = NewFileName(d.Name(), name) result.Object, err = statObjectMayNotExist(ctx, d.bucket, result.FullName) if err != nil { err = fmt.Errorf("statObjectMayNotExist: %v", err) return } return } func (d *dirInode) lookUpChildDir( ctx context.Context, dirName string) (result LookUpResult, err error) { b := syncutil.NewBundle(ctx) childName := NewDirName(d.Name(), dirName) // Stat the placeholder object. b.Add(func(ctx context.Context) (err error) { result.Bucket = d.Bucket() result.FullName = childName result.Object, err = statObjectMayNotExist(ctx, d.bucket, result.FullName) if err != nil { err = fmt.Errorf("statObjectMayNotExist: %v", err) return } return }) // If implicit directories are enabled, find out whether the child name is // implicitly defined. if d.implicitDirs { b.Add(func(ctx context.Context) (err error) { result.ImplicitDir, err = objectNamePrefixNonEmpty( ctx, d.bucket, childName.GcsObjectName()) if err != nil { err = fmt.Errorf("objectNamePrefixNonEmpty: %v", err) return } return }) } // Wait for both. err = b.Join() if err != nil { return } return } // Look up the file for a (file, dir) pair with conflicting names, overriding // the default behavior. If the file doesn't exist, return a nil record with a // nil error. If the directory doesn't exist, pretend the file doesn't exist. // // REQUIRES: strings.HasSuffix(name, ConflictingFileNameSuffix) func (d *dirInode) lookUpConflicting( ctx context.Context, name string) (result LookUpResult, err error) { strippedName := strings.TrimSuffix(name, ConflictingFileNameSuffix) // In order to a marked name to be accepted, we require the conflicting // directory to exist. var dirResult LookUpResult dirResult, err = d.lookUpChildDir(ctx, strippedName) if err != nil { err = fmt.Errorf("lookUpChildDir for stripped name: %v", err) return } if !dirResult.Exists() { return } // The directory name exists. Find the conflicting file. result, err = d.lookUpChildFile(ctx, strippedName) if err != nil { err = fmt.Errorf("lookUpChildFile for stripped name: %v", err) return } return } // List the supplied object name prefix to find out whether it is non-empty. func objectNamePrefixNonEmpty( ctx context.Context, bucket gcs.Bucket, prefix string) (nonEmpty bool, err error) { req := &gcs.ListObjectsRequest{ Prefix: prefix, MaxResults: 1, } listing, err := bucket.ListObjects(ctx, req) if err != nil { err = fmt.Errorf("ListObjects: %v", err) return } nonEmpty = len(listing.Objects) != 0 return } // Stat the object with the given name, returning (nil, nil) if the object // doesn't exist rather than failing. func
( ctx context.Context, bucket gcs.Bucket, name Name) (o *gcs.Object, err error) { // Call the bucket. req := &gcs.StatObjectRequest{ Name: name.GcsObjectName(), } o, err = bucket.StatObject(ctx, req) // Suppress "not found" errors. if _, ok := err.(*gcs.NotFoundError); ok { err = nil } // Annotate others. if err != nil { err = fmt.Errorf("StatObject: %v", err) return } return } // Fail if the name already exists. Pass on errors directly. func (d *dirInode) createNewObject( ctx context.Context, name Name, metadata map[string]string) (o *gcs.Object, err error) { // Create an empty backing object for the child, failing if it already // exists. var precond int64 createReq := &gcs.CreateObjectRequest{ Name: name.GcsObjectName(), Contents: strings.NewReader(""), GenerationPrecondition: &precond, Metadata: metadata, } o, err = d.bucket.CreateObject(ctx, createReq) if err != nil { return } return } // An implementation detail fo filterMissingChildDirs. func filterMissingChildDirNames( ctx context.Context, bucket gcs.Bucket, dirName Name, unfiltered <-chan string, filtered chan<- string) (err error) { for name := range unfiltered { var o *gcs.Object // Stat the placeholder. o, err = statObjectMayNotExist( ctx, bucket, NewDirName(dirName, name), ) if err != nil { err = fmt.Errorf("statObjectMayNotExist: %v", err) return } // Should we pass on this name? if o == nil { continue } select { case <-ctx.Done(): err = ctx.Err() return case filtered <- name: } } return } // Given a list of child names that appear to be directories according to // d.bucket.ListObjects (which always behaves as if implicit directories are // enabled), filter out the ones for which a placeholder object does not // actually exist. If implicit directories are enabled, simply return them all. // // LOCKS_REQUIRED(d) func (d *dirInode) filterMissingChildDirs( ctx context.Context, in []string) (out []string, err error) { // Do we need to do anything? if d.implicitDirs { out = in return } b := syncutil.NewBundle(ctx) // First add any names that we already know are directories according to our // cache, removing them from the input. now := d.cacheClock.Now() var tmp []string for _, name := range in { if d.cache.IsDir(now, name) { out = append(out, name) } else { tmp = append(tmp, name) } } in = tmp // Feed names into a channel. unfiltered := make(chan string, 100) b.Add(func(ctx context.Context) (err error) { defer close(unfiltered) for _, name := range in { select { case <-ctx.Done(): err = ctx.Err() return case unfiltered <- name: } } return }) // Stat the placeholder object for each, filtering out placeholders that are // not found. Use some parallelism. const statWorkers = 32 filtered := make(chan string, 100) var wg sync.WaitGroup for i := 0; i < statWorkers; i++ { wg.Add(1) b.Add(func(ctx context.Context) (err error) { defer wg.Done() err = filterMissingChildDirNames( ctx, d.bucket, d.Name(), unfiltered, filtered) return }) } go func() { wg.Wait() close(filtered) }() // Accumulate into a slice. var filteredSlice []string b.Add(func(ctx context.Context) (err error) { for name := range filtered { filteredSlice = append(filteredSlice, name) } return }) // Wait for everything to complete. err = b.Join() // Update the cache with everything we learned. now = d.cacheClock.Now() for _, name := range filteredSlice { d.cache.NoteDir(now, name) } // Return everything we learned. out = append(out, filteredSlice...) return } //////////////////////////////////////////////////////////////////////// // Public interface //////////////////////////////////////////////////////////////////////// func (d *dirInode) Lock() { d.mu.Lock() } func (d *dirInode) Unlock() { d.mu.Unlock() } func (d *dirInode) ID() fuseops.InodeID { return d.id } func (d *dirInode) Name() Name { return d.name } // LOCKS_REQUIRED(d) func (d *dirInode) IncrementLookupCount() { d.lc.Inc() } // LOCKS_REQUIRED(d) func (d *dirInode) DecrementLookupCount(n uint64) (destroy bool) { destroy = d.lc.Dec(n) return } // LOCKS_REQUIRED(d) func (d *dirInode) Destroy() (err error) { // Nothing interesting to do. return } // LOCKS_REQUIRED(d) func (d *dirInode) Attributes( ctx context.Context) (attrs fuseops.InodeAttributes, err error) { // Set up basic attributes. attrs = d.attrs attrs.Nlink = 1 return } func (d *dirInode) Bucket() gcsx.SyncerBucket { return d.bucket } // A suffix that can be used to unambiguously tag a file system name. // (Unambiguous because U+000A is not allowed in GCS object names.) This is // used to refer to the file/symlink in a (file/symlink, directory) pair with // conflicting object names. // // See also the notes on DirInode.LookUpChild. const ConflictingFileNameSuffix = "\n" // LOCKS_REQUIRED(d) func (d *dirInode) LookUpChild( ctx context.Context, name string) (result LookUpResult, err error) { // Consult the cache about the type of the child. This may save us work // below. now := d.cacheClock.Now() cacheSaysFile := d.cache.IsFile(now, name) cacheSaysDir := d.cache.IsDir(now, name) // Is this a conflict marker name? if strings.HasSuffix(name, ConflictingFileNameSuffix) { result, err = d.lookUpConflicting(ctx, name) return } // Stat the child as a file, unless the cache has told us it's a directory // but not a file. b := syncutil.NewBundle(ctx) var fileResult LookUpResult if !(cacheSaysDir && !cacheSaysFile) { b.Add(func(ctx context.Context) (err error) { fileResult, err = d.lookUpChildFile(ctx, name) return }) } // Stat the child as a directory, unless the cache has told us it's a file // but not a directory. var dirResult LookUpResult if !(cacheSaysFile && !cacheSaysDir) { b.Add(func(ctx context.Context) (err error) { dirResult, err = d.lookUpChildDir(ctx, name) return }) } // Wait for both. err = b.Join() if err != nil { return } // Prefer directories over files. switch { case dirResult.Exists(): result = dirResult case fileResult.Exists(): result = fileResult } // Update the cache. now = d.cacheClock.Now() if fileResult.Exists() { d.cache.NoteFile(now, name) } if dirResult.Exists() { d.cache.NoteDir(now, name) } return } // LOCKS_REQUIRED(d) func (d *dirInode) ReadEntries( ctx context.Context, tok string) (entries []fuseutil.Dirent, newTok string, err error) { // Ask the bucket to list some objects. req := &gcs.ListObjectsRequest{ Delimiter: "/", Prefix: d.Name().GcsObjectName(), ContinuationToken: tok, } listing, err := d.bucket.ListObjects(ctx, req) if err != nil { err = fmt.Errorf("ListObjects: %v", err) return } // Convert objects to entries for files or symlinks. for _, o := range listing.Objects { // Skip the entry for the backing object itself, which of course has its // own name as a prefix but which we don't wan to appear to contain itself. if o.Name == d.Name().GcsObjectName() { continue } e := fuseutil.Dirent{ Name: path.Base(o.Name), Type: fuseutil.DT_File, } if IsSymlink(o) { e.Type = fuseutil.DT_Link } entries = append(entries, e) } // Extract directory names from the collapsed runs. var dirNames []string for _, p := range listing.CollapsedRuns { dirNames = append(dirNames, path.Base(p)) } // Filter the directory names according to our implicit directory settings. dirNames, err = d.filterMissingChildDirs(ctx, dirNames) if err != nil { err = fmt.Errorf("filterMissingChildDirs: %v", err) return } // Return entries for directories. for _, name := range dirNames { e := fuseutil.Dirent{ Name: name, Type: fuseutil.DT_Directory, } entries = append(entries, e) } // Return an appropriate continuation token, if any. newTok = listing.ContinuationToken // Update the type cache with everything we learned. now := d.cacheClock.Now() for _, e := range entries { switch e.Type { case fuseutil.DT_File: d.cache.NoteFile(now, e.Name) case fuseutil.DT_Directory: d.cache.NoteDir(now, e.Name) } } return } // LOCKS_REQUIRED(d) func (d *dirInode) CreateChildFile( ctx context.Context, name string) (fn Name, o *gcs.Object, err error) { metadata := map[string]string{ FileMtimeMetadataKey: d.mtimeClock.Now().UTC().Format(time.RFC3339Nano), } fn = NewFileName(d.Name(), name) o, err = d.createNewObject(ctx, fn, metadata) if err != nil { return } d.cache.NoteFile(d.cacheClock.Now(), name) return } // LOCKS_REQUIRED(d) func (d *dirInode) CloneToChildFile( ctx context.Context, name string, src *gcs.Object) (fn Name, o *gcs.Object, err error) { // Erase any existing type information for this name. d.cache.Erase(name) fn = NewFileName(d.Name(), name) // Clone over anything that might already exist for the name. o, err = d.bucket.CopyObject( ctx, &gcs.CopyObjectRequest{ SrcName: src.Name, SrcGeneration: src.Generation, SrcMetaGenerationPrecondition: &src.MetaGeneration, DstName: fn.GcsObjectName(), }) if err != nil { return } // Update the type cache. d.cache.NoteFile(d.cacheClock.Now(), name) return } // LOCKS_REQUIRED(d) func (d *dirInode) CreateChildSymlink( ctx context.Context, name string, target string) (fn Name, o *gcs.Object, err error) { fn = NewFileName(d.Name(), name) metadata := map[string]string{ SymlinkMetadataKey: target, } o, err = d.createNewObject(ctx, fn, metadata) if err != nil { return } d.cache.NoteFile(d.cacheClock.Now(), name) return } // LOCKS_REQUIRED(d) func (d *dirInode) CreateChildDir( ctx context.Context, name string) (fn Name, o *gcs.Object, err error) { fn = NewDirName(d.Name(), name) o, err = d.createNewObject(ctx, fn, nil) if err != nil { return } d.cache.NoteDir(d.cacheClock.Now(), name) return } // LOCKS_REQUIRED(d) func (d *dirInode) DeleteChildFile( ctx context.Context, name string, generation int64, metaGeneration *int64) (err error) { d.cache.Erase(name) childName := NewFileName(d.Name(), name) err = d.bucket.DeleteObject( ctx, &gcs.DeleteObjectRequest{ Name: childName.GcsObjectName(), Generation: generation, MetaGenerationPrecondition: metaGeneration, }) if err != nil { err = fmt.Errorf("DeleteObject: %v", err) return } return } // LOCKS_REQUIRED(d) func (d *dirInode) DeleteChildDir( ctx context.Context, name string) (err error) { d.cache.Erase(name) childName := NewDirName(d.Name(), name) // Delete the backing object. Unfortunately we have no way to precondition // this on the directory being empty. err = d.bucket.DeleteObject( ctx, &gcs.DeleteObjectRequest{ Name: childName.GcsObjectName(), }) if err != nil { err = fmt.Errorf("DeleteObject: %v", err) return } return }
statObjectMayNotExist
identifier_name
forwarder_test.go
// Copyright (c) 2015 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. package forward import ( "bytes" "context" json2 "encoding/json" "errors" "sync" "testing" "time" events "github.com/temporalio/ringpop-go/events/test/mocks" "github.com/temporalio/ringpop-go/test/thrift/pingpong" athrift "github.com/apache/thrift/lib/go/thrift" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/suite" "github.com/temporalio/tchannel-go" "github.com/temporalio/tchannel-go/json" "github.com/temporalio/tchannel-go/thrift" ) type ContextKey string type ForwarderTestSuite struct { suite.Suite sender *MockSender forwarder *Forwarder channel *tchannel.Channel peer *tchannel.Channel } type Ping struct { Message string `json:"message"` } func (p Ping) Bytes() []byte { data, _ := json2.Marshal(p) return data } type Pong struct { Message string `json:"message"` From string `json:"from"` Headers map[string]string } func (s *ForwarderTestSuite) registerPong(address string, channel *tchannel.Channel) { hmap := map[string]interface{}{ "/ping": func(ctx json.Context, ping *Ping) (*Pong, error) { return &Pong{"Hello, world!", address, ctx.Headers()}, nil }, "/error": func(ctx json.Context, ping *Ping) (*Pong, error) { return nil, errors.New("remote error") }, } s.Require().NoError(json.Register(channel, hmap, func(ctx context.Context, err error) {})) thriftHandler := &pingpong.MockTChanPingPong{} // successful request with context thriftHandler.On("Ping", mock.MatchedBy( func(c thrift.Context) bool { return true }), &pingpong.Ping{ Key: "ctxTest", }).Return(&pingpong.Pong{ Source: address, }, nil) // successful request thriftHandler.On("Ping", mock.Anything, &pingpong.Ping{ Key: "success", }).Return(&pingpong.Pong{ Source: address, }, nil) // error request thriftHandler.On("Ping", mock.Anything, &pingpong.Ping{ Key: "error", }).Return(nil, &pingpong.PingError{}) server := thrift.NewServer(channel) server.Register(pingpong.NewTChanPingPongServer(thriftHandler)) } func (s *ForwarderTestSuite) SetupSuite() { channel, err := tchannel.NewChannel("test", nil) s.Require().NoError(err, "channel must be created successfully") s.channel = channel peer, err := tchannel.NewChannel("test", nil) s.Require().NoError(err, "channel must be created successfully") s.registerPong("correct pinging host", peer) s.Require().NoError(peer.ListenAndServe("127.0.0.1:0"), "channel must listen") sender := &MockSender{} sender.On("Lookup", "me").Return("192.0.2.1:1", nil) sender.On("WhoAmI").Return("192.0.2.1:1", nil) // processes can not listen on port 0 so it is safe to assume that this address is failing immediatly, preventing the timeout path to kick in. sender.On("Lookup", "immediate fail").Return("127.0.0.1:0", nil) sender.On("Lookup", "reachable").Return(peer.PeerInfo().HostPort, nil) sender.On("Lookup", "unreachable").Return("192.0.2.128:1", nil) sender.On("Lookup", "error").Return("", errors.New("lookup error")) s.sender = sender s.peer = peer s.forwarder = NewForwarder(s.sender, s.channel.GetSubChannel("forwarder")) } func (s *ForwarderTestSuite) TearDownSuite()
func (s *ForwarderTestSuite) TestForwardJSON() { var ping Ping var pong Pong dest, err := s.sender.Lookup("reachable") s.NoError(err) headerBytes := []byte(`{"hdr1": "val1"}`) res, err := s.forwarder.ForwardRequest(ping.Bytes(), dest, "test", "/ping", []string{"reachable"}, tchannel.JSON, &Options{Headers: headerBytes}) s.NoError(err, "expected request to be forwarded") s.NoError(json2.Unmarshal(res, &pong)) s.Equal("correct pinging host", pong.From) s.Equal("Hello, world!", pong.Message) s.Equal(map[string]string{"hdr1": "val1"}, pong.Headers) } func (s *ForwarderTestSuite) TestForwardJSONErrorResponse() { var ping Ping dest, err := s.sender.Lookup("reachable") s.NoError(err) _, err = s.forwarder.ForwardRequest(ping.Bytes(), dest, "test", "/error", []string{"reachable"}, tchannel.JSON, nil) s.EqualError(err, "remote error") } func (s *ForwarderTestSuite) TestForwardJSONInvalidEndpoint() { var ping Ping dest, err := s.sender.Lookup("reachable") s.NoError(err) _, err = s.forwarder.ForwardRequest(ping.Bytes(), dest, "test", "/invalid", []string{"reachable"}, tchannel.JSON, &Options{ MaxRetries: 1, RetrySchedule: []time.Duration{ 100 * time.Millisecond, }, }) s.EqualError(err, "max retries exceeded") } func (s *ForwarderTestSuite) TestForwardThrift() { dest, err := s.sender.Lookup("reachable") s.NoError(err) request := &pingpong.PingPongPingArgs{ Request: &pingpong.Ping{ Key: "success", }, } bytes, err := SerializeThrift(context.Background(), request) s.NoError(err, "expected ping to be serialized") res, err := s.forwarder.ForwardRequest(bytes, dest, "test", "PingPong::Ping", []string{"reachable"}, tchannel.Thrift, nil) s.NoError(err, "expected request to be forwarded") var response pingpong.PingPongPingResult err = DeserializeThrift(context.Background(), res, &response) s.NoError(err) s.Equal("correct pinging host", response.Success.Source) } func (s *ForwarderTestSuite) TestForwardThriftWithCtxOption() { dest, err := s.sender.Lookup("reachable") s.NoError(err) request := &pingpong.PingPongPingArgs{ Request: &pingpong.Ping{ Key: "ctxTest", }, } bytes1, err := SerializeThrift(context.Background(), request) s.NoError(err, "expected ping to be serialized") k := ContextKey("key") ctx := thrift.Wrap(context.WithValue(context.Background(), k, "val")) res, err := s.forwarder.ForwardRequest(bytes1, dest, "test", "PingPong::Ping", []string{"reachable"}, tchannel.Thrift, &Options{ Ctx: ctx, }) s.NoError(err, "expected request to be forwarded") var response pingpong.PingPongPingResult err = DeserializeThrift(context.Background(), res, &response) s.NoError(err) s.Equal("correct pinging host", response.Success.Source) } func (s *ForwarderTestSuite) TestForwardThriftErrorResponse() { dest, err := s.sender.Lookup("reachable") s.NoError(err) request := &pingpong.PingPongPingArgs{ Request: &pingpong.Ping{ Key: "error", }, } bytes, err := SerializeThrift(context.Background(), request) s.NoError(err, "expected ping to be serialized") res, err := s.forwarder.ForwardRequest(bytes, dest, "test", "PingPong::Ping", []string{"reachable"}, tchannel.Thrift, nil) s.NoError(err, "expected request to be forwarded") var response pingpong.PingPongPingResult err = DeserializeThrift(context.Background(), res, &response) s.NoError(err) s.NotNil(response.PingError, "expected a pingerror") } func (s *ForwarderTestSuite) TestMaxRetries() { var ping Ping dest, err := s.sender.Lookup("immediate fail") s.NoError(err) _, err = s.forwarder.ForwardRequest(ping.Bytes(), dest, "test", "/ping", []string{"immediate fail"}, tchannel.JSON, &Options{ MaxRetries: 2, RetrySchedule: []time.Duration{time.Millisecond, time.Millisecond}, }) s.EqualError(err, "max retries exceeded") } func (s *ForwarderTestSuite) TestLookupErrorInRetry() { var ping Ping dest, err := s.sender.Lookup("immediate fail") s.NoError(err) _, err = s.forwarder.ForwardRequest(ping.Bytes(), dest, "test", "/ping", []string{"error"}, tchannel.JSON, &Options{ MaxRetries: 2, RetrySchedule: []time.Duration{time.Millisecond, time.Millisecond}, }) // lookup errors are swallowed and result in the key missing in the dests list, so a diverged error is expected s.EqualError(err, "key destinations have diverged") } func (s *ForwarderTestSuite) TestKeysDiverged() { var ping Ping dest, err := s.sender.Lookup("immediate fail") s.NoError(err) // no keys should result in destinations length of 0 during retry, causing abortion of request _, err = s.forwarder.ForwardRequest(ping.Bytes(), dest, "test", "/ping", nil, tchannel.JSON, &Options{ MaxRetries: 2, RetrySchedule: []time.Duration{time.Millisecond, time.Millisecond}, }) s.EqualError(err, "key destinations have diverged") } func (s *ForwarderTestSuite) TestRequestTimesOut() { var ping Ping dest, err := s.sender.Lookup("unreachable") s.NoError(err) _, err = s.forwarder.ForwardRequest(ping.Bytes(), dest, "test", "/ping", []string{"unreachable"}, tchannel.JSON, &Options{ // By providing a negative timeout the context will directly return with // a DeadlineExceeded error Timeout: -1, }) s.EqualError(err, "request timed out") } func (s *ForwarderTestSuite) TestRequestRerouted() { var ping Ping var pong Pong dest, err := s.sender.Lookup("immediate fail") s.NoError(err) res, err := s.forwarder.ForwardRequest(ping.Bytes(), dest, "test", "/ping", []string{"reachable"}, tchannel.JSON, &Options{ MaxRetries: 1, RerouteRetries: true, RetrySchedule: []time.Duration{time.Millisecond}, }) s.NoError(err, "expected request to be rerouted") s.NoError(json2.Unmarshal(res, &pong)) s.Equal("correct pinging host", pong.From) s.Equal("Hello, world!", pong.Message) } func (s *ForwarderTestSuite) TestRequestNoReroutes() { var ping Ping dest, err := s.sender.Lookup("immediate fail") s.NoError(err) _, err = s.forwarder.ForwardRequest(ping.Bytes(), dest, "test", "/ping", []string{"reachable"}, tchannel.JSON, &Options{ MaxRetries: 1, RetrySchedule: []time.Duration{time.Millisecond}, }) s.EqualError(err, "max retries exceeded") } func (s *ForwarderTestSuite) TestInvalidInflightDecrement() { var wg sync.WaitGroup wg.Add(1) listener := &events.EventListener{} listener.On("HandleEvent", mock.AnythingOfTypeArgument("forward.InflightRequestsMiscountEvent")).Run(func(args mock.Arguments) { wg.Done() }).Return() s.forwarder.inflight = 0 s.forwarder.AddListener(listener) defer s.forwarder.RemoveListener(listener) s.forwarder.decrementInflight() s.Assertions.Equal(int64(0), s.forwarder.inflight, "Expected inflight to stay at 0 when decremented at 0") // wait for HandleEvent with forward.InflightRequestsMiscountEvent being called wg.Wait() } func TestForwarderTestSuite(t *testing.T) { suite.Run(t, new(ForwarderTestSuite)) } func TestSetForwardedHeader(t *testing.T) { // empty keys array test ctx, _ := thrift.NewContext(0 * time.Second) ctx = SetForwardedHeader(ctx, nil) assert.Equal(t, "[]", ctx.Headers()[ForwardedHeaderName], "expected the forwarding header to be set and be an empty array instead of null for the nil pointer") // preserve existing headers ctx, _ = thrift.NewContext(0 * time.Second) ctx = thrift.WithHeaders(ctx, map[string]string{ "keep": "this key", }) ctx = SetForwardedHeader(ctx, []string{"foo"}) assert.Equal(t, "[\"foo\"]", ctx.Headers()[ForwardedHeaderName], "expected the forwarding header to be set to a serialized array of keys used in forwarding") assert.Equal(t, "this key", ctx.Headers()["keep"], "expected the header set before the forwarding header to still exist") // multiple keys encoded in the header ctx, _ = thrift.NewContext(0 * time.Second) ctx = SetForwardedHeader(ctx, []string{"key1", "key2"}) assert.Equal(t, "[\"key1\",\"key2\"]", ctx.Headers()[ForwardedHeaderName], "expected the forwarding header to be set with both keys encoded") } func TestDeleteForwardedHeader(t *testing.T) { ctx, _ := thrift.NewContext(0 * time.Second) if DeleteForwardedHeader(ctx) { t.Errorf("ringpop claimed that the forwarded header was set before it was set") } ctx = SetForwardedHeader(ctx, nil) if !DeleteForwardedHeader(ctx) { t.Errorf("ringpop was not able to identify that the forwarded header was set") } ctx, _ = thrift.NewContext(0 * time.Second) ctx = thrift.WithHeaders(ctx, map[string]string{ "keep": "this key", }) if DeleteForwardedHeader(ctx) { t.Errorf("ringpop claimed that the forwarded header was set before it was set in the case of alread present headers") } ctx = SetForwardedHeader(ctx, nil) if !DeleteForwardedHeader(ctx) { t.Errorf("ringpop was not able to identify that the forwarded header was set in the case of alread present headers") } } // SerializeThrift takes a thrift struct and returns the serialized bytes // of that struct using the thrift binary protocol. This is a temporary // measure before frames can be forwarded directly past the endpoint to the proper // destinaiton. func SerializeThrift(ctx context.Context, s athrift.TStruct) ([]byte, error) { var b []byte var buffer = bytes.NewBuffer(b) transport := athrift.NewStreamTransportW(buffer) if err := s.Write(ctx, athrift.NewTBinaryProtocolTransport(transport)); err != nil { return nil, err } if err := transport.Flush(ctx); err != nil { return nil, err } return buffer.Bytes(), nil } // DeserializeThrift takes a byte slice and attempts to write it into the // given thrift struct using the thrift binary protocol. This is a temporary // measure before frames can be forwarded directly past the endpoint to the proper // destinaiton. func DeserializeThrift(ctx context.Context, b []byte, s athrift.TStruct) error { reader := bytes.NewReader(b) transport := athrift.NewStreamTransportR(reader) return s.Read(ctx, athrift.NewTBinaryProtocolTransport(transport)) }
{ s.channel.Close() s.peer.Close() }
identifier_body
forwarder_test.go
// Copyright (c) 2015 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. package forward import ( "bytes" "context" json2 "encoding/json" "errors" "sync" "testing" "time" events "github.com/temporalio/ringpop-go/events/test/mocks" "github.com/temporalio/ringpop-go/test/thrift/pingpong" athrift "github.com/apache/thrift/lib/go/thrift" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/suite" "github.com/temporalio/tchannel-go" "github.com/temporalio/tchannel-go/json" "github.com/temporalio/tchannel-go/thrift" ) type ContextKey string type ForwarderTestSuite struct { suite.Suite sender *MockSender forwarder *Forwarder channel *tchannel.Channel peer *tchannel.Channel } type Ping struct { Message string `json:"message"` } func (p Ping) Bytes() []byte { data, _ := json2.Marshal(p) return data } type Pong struct { Message string `json:"message"` From string `json:"from"` Headers map[string]string } func (s *ForwarderTestSuite) registerPong(address string, channel *tchannel.Channel) { hmap := map[string]interface{}{ "/ping": func(ctx json.Context, ping *Ping) (*Pong, error) { return &Pong{"Hello, world!", address, ctx.Headers()}, nil }, "/error": func(ctx json.Context, ping *Ping) (*Pong, error) { return nil, errors.New("remote error") }, } s.Require().NoError(json.Register(channel, hmap, func(ctx context.Context, err error) {})) thriftHandler := &pingpong.MockTChanPingPong{} // successful request with context thriftHandler.On("Ping", mock.MatchedBy( func(c thrift.Context) bool { return true }), &pingpong.Ping{ Key: "ctxTest", }).Return(&pingpong.Pong{ Source: address, }, nil) // successful request thriftHandler.On("Ping", mock.Anything, &pingpong.Ping{ Key: "success", }).Return(&pingpong.Pong{ Source: address, }, nil) // error request thriftHandler.On("Ping", mock.Anything, &pingpong.Ping{ Key: "error", }).Return(nil, &pingpong.PingError{}) server := thrift.NewServer(channel) server.Register(pingpong.NewTChanPingPongServer(thriftHandler)) } func (s *ForwarderTestSuite) SetupSuite() { channel, err := tchannel.NewChannel("test", nil) s.Require().NoError(err, "channel must be created successfully") s.channel = channel peer, err := tchannel.NewChannel("test", nil) s.Require().NoError(err, "channel must be created successfully") s.registerPong("correct pinging host", peer) s.Require().NoError(peer.ListenAndServe("127.0.0.1:0"), "channel must listen") sender := &MockSender{} sender.On("Lookup", "me").Return("192.0.2.1:1", nil) sender.On("WhoAmI").Return("192.0.2.1:1", nil) // processes can not listen on port 0 so it is safe to assume that this address is failing immediatly, preventing the timeout path to kick in. sender.On("Lookup", "immediate fail").Return("127.0.0.1:0", nil) sender.On("Lookup", "reachable").Return(peer.PeerInfo().HostPort, nil) sender.On("Lookup", "unreachable").Return("192.0.2.128:1", nil) sender.On("Lookup", "error").Return("", errors.New("lookup error")) s.sender = sender s.peer = peer s.forwarder = NewForwarder(s.sender, s.channel.GetSubChannel("forwarder")) } func (s *ForwarderTestSuite) TearDownSuite() { s.channel.Close() s.peer.Close() } func (s *ForwarderTestSuite) TestForwardJSON() { var ping Ping var pong Pong dest, err := s.sender.Lookup("reachable") s.NoError(err) headerBytes := []byte(`{"hdr1": "val1"}`) res, err := s.forwarder.ForwardRequest(ping.Bytes(), dest, "test", "/ping", []string{"reachable"}, tchannel.JSON, &Options{Headers: headerBytes}) s.NoError(err, "expected request to be forwarded") s.NoError(json2.Unmarshal(res, &pong)) s.Equal("correct pinging host", pong.From) s.Equal("Hello, world!", pong.Message) s.Equal(map[string]string{"hdr1": "val1"}, pong.Headers) } func (s *ForwarderTestSuite) TestForwardJSONErrorResponse() { var ping Ping dest, err := s.sender.Lookup("reachable") s.NoError(err) _, err = s.forwarder.ForwardRequest(ping.Bytes(), dest, "test", "/error", []string{"reachable"}, tchannel.JSON, nil) s.EqualError(err, "remote error") } func (s *ForwarderTestSuite) TestForwardJSONInvalidEndpoint() { var ping Ping dest, err := s.sender.Lookup("reachable") s.NoError(err) _, err = s.forwarder.ForwardRequest(ping.Bytes(), dest, "test", "/invalid", []string{"reachable"}, tchannel.JSON, &Options{ MaxRetries: 1, RetrySchedule: []time.Duration{ 100 * time.Millisecond, }, }) s.EqualError(err, "max retries exceeded") } func (s *ForwarderTestSuite) TestForwardThrift() { dest, err := s.sender.Lookup("reachable") s.NoError(err) request := &pingpong.PingPongPingArgs{ Request: &pingpong.Ping{ Key: "success", }, } bytes, err := SerializeThrift(context.Background(), request) s.NoError(err, "expected ping to be serialized") res, err := s.forwarder.ForwardRequest(bytes, dest, "test", "PingPong::Ping", []string{"reachable"}, tchannel.Thrift, nil) s.NoError(err, "expected request to be forwarded") var response pingpong.PingPongPingResult err = DeserializeThrift(context.Background(), res, &response) s.NoError(err) s.Equal("correct pinging host", response.Success.Source) } func (s *ForwarderTestSuite) TestForwardThriftWithCtxOption() { dest, err := s.sender.Lookup("reachable") s.NoError(err) request := &pingpong.PingPongPingArgs{ Request: &pingpong.Ping{ Key: "ctxTest", }, } bytes1, err := SerializeThrift(context.Background(), request) s.NoError(err, "expected ping to be serialized") k := ContextKey("key") ctx := thrift.Wrap(context.WithValue(context.Background(), k, "val")) res, err := s.forwarder.ForwardRequest(bytes1, dest, "test", "PingPong::Ping", []string{"reachable"}, tchannel.Thrift, &Options{ Ctx: ctx, }) s.NoError(err, "expected request to be forwarded") var response pingpong.PingPongPingResult err = DeserializeThrift(context.Background(), res, &response) s.NoError(err) s.Equal("correct pinging host", response.Success.Source) } func (s *ForwarderTestSuite) TestForwardThriftErrorResponse() { dest, err := s.sender.Lookup("reachable") s.NoError(err) request := &pingpong.PingPongPingArgs{ Request: &pingpong.Ping{ Key: "error", }, } bytes, err := SerializeThrift(context.Background(), request) s.NoError(err, "expected ping to be serialized") res, err := s.forwarder.ForwardRequest(bytes, dest, "test", "PingPong::Ping", []string{"reachable"}, tchannel.Thrift, nil) s.NoError(err, "expected request to be forwarded") var response pingpong.PingPongPingResult err = DeserializeThrift(context.Background(), res, &response) s.NoError(err) s.NotNil(response.PingError, "expected a pingerror") } func (s *ForwarderTestSuite) TestMaxRetries() { var ping Ping dest, err := s.sender.Lookup("immediate fail") s.NoError(err) _, err = s.forwarder.ForwardRequest(ping.Bytes(), dest, "test", "/ping", []string{"immediate fail"}, tchannel.JSON, &Options{ MaxRetries: 2, RetrySchedule: []time.Duration{time.Millisecond, time.Millisecond}, }) s.EqualError(err, "max retries exceeded") } func (s *ForwarderTestSuite) TestLookupErrorInRetry() { var ping Ping dest, err := s.sender.Lookup("immediate fail") s.NoError(err) _, err = s.forwarder.ForwardRequest(ping.Bytes(), dest, "test", "/ping", []string{"error"}, tchannel.JSON, &Options{ MaxRetries: 2, RetrySchedule: []time.Duration{time.Millisecond, time.Millisecond}, }) // lookup errors are swallowed and result in the key missing in the dests list, so a diverged error is expected s.EqualError(err, "key destinations have diverged") } func (s *ForwarderTestSuite) TestKeysDiverged() { var ping Ping dest, err := s.sender.Lookup("immediate fail") s.NoError(err) // no keys should result in destinations length of 0 during retry, causing abortion of request _, err = s.forwarder.ForwardRequest(ping.Bytes(), dest, "test", "/ping", nil, tchannel.JSON, &Options{ MaxRetries: 2, RetrySchedule: []time.Duration{time.Millisecond, time.Millisecond}, }) s.EqualError(err, "key destinations have diverged") } func (s *ForwarderTestSuite) TestRequestTimesOut() { var ping Ping dest, err := s.sender.Lookup("unreachable") s.NoError(err) _, err = s.forwarder.ForwardRequest(ping.Bytes(), dest, "test", "/ping", []string{"unreachable"}, tchannel.JSON, &Options{ // By providing a negative timeout the context will directly return with // a DeadlineExceeded error Timeout: -1, }) s.EqualError(err, "request timed out") } func (s *ForwarderTestSuite) TestRequestRerouted() { var ping Ping var pong Pong dest, err := s.sender.Lookup("immediate fail") s.NoError(err) res, err := s.forwarder.ForwardRequest(ping.Bytes(), dest, "test", "/ping", []string{"reachable"}, tchannel.JSON, &Options{ MaxRetries: 1, RerouteRetries: true, RetrySchedule: []time.Duration{time.Millisecond}, }) s.NoError(err, "expected request to be rerouted") s.NoError(json2.Unmarshal(res, &pong)) s.Equal("correct pinging host", pong.From) s.Equal("Hello, world!", pong.Message) } func (s *ForwarderTestSuite) TestRequestNoReroutes() { var ping Ping dest, err := s.sender.Lookup("immediate fail") s.NoError(err) _, err = s.forwarder.ForwardRequest(ping.Bytes(), dest, "test", "/ping", []string{"reachable"}, tchannel.JSON, &Options{ MaxRetries: 1, RetrySchedule: []time.Duration{time.Millisecond}, }) s.EqualError(err, "max retries exceeded") } func (s *ForwarderTestSuite) TestInvalidInflightDecrement() { var wg sync.WaitGroup wg.Add(1) listener := &events.EventListener{} listener.On("HandleEvent", mock.AnythingOfTypeArgument("forward.InflightRequestsMiscountEvent")).Run(func(args mock.Arguments) { wg.Done() }).Return() s.forwarder.inflight = 0 s.forwarder.AddListener(listener) defer s.forwarder.RemoveListener(listener) s.forwarder.decrementInflight() s.Assertions.Equal(int64(0), s.forwarder.inflight, "Expected inflight to stay at 0 when decremented at 0") // wait for HandleEvent with forward.InflightRequestsMiscountEvent being called wg.Wait() } func TestForwarderTestSuite(t *testing.T) { suite.Run(t, new(ForwarderTestSuite)) } func TestSetForwardedHeader(t *testing.T) { // empty keys array test ctx, _ := thrift.NewContext(0 * time.Second) ctx = SetForwardedHeader(ctx, nil) assert.Equal(t, "[]", ctx.Headers()[ForwardedHeaderName], "expected the forwarding header to be set and be an empty array instead of null for the nil pointer") // preserve existing headers ctx, _ = thrift.NewContext(0 * time.Second) ctx = thrift.WithHeaders(ctx, map[string]string{ "keep": "this key", }) ctx = SetForwardedHeader(ctx, []string{"foo"}) assert.Equal(t, "[\"foo\"]", ctx.Headers()[ForwardedHeaderName], "expected the forwarding header to be set to a serialized array of keys used in forwarding") assert.Equal(t, "this key", ctx.Headers()["keep"], "expected the header set before the forwarding header to still exist") // multiple keys encoded in the header ctx, _ = thrift.NewContext(0 * time.Second) ctx = SetForwardedHeader(ctx, []string{"key1", "key2"}) assert.Equal(t, "[\"key1\",\"key2\"]", ctx.Headers()[ForwardedHeaderName], "expected the forwarding header to be set with both keys encoded") } func TestDeleteForwardedHeader(t *testing.T) { ctx, _ := thrift.NewContext(0 * time.Second) if DeleteForwardedHeader(ctx) { t.Errorf("ringpop claimed that the forwarded header was set before it was set") } ctx = SetForwardedHeader(ctx, nil) if !DeleteForwardedHeader(ctx) { t.Errorf("ringpop was not able to identify that the forwarded header was set") } ctx, _ = thrift.NewContext(0 * time.Second) ctx = thrift.WithHeaders(ctx, map[string]string{ "keep": "this key", }) if DeleteForwardedHeader(ctx) { t.Errorf("ringpop claimed that the forwarded header was set before it was set in the case of alread present headers") } ctx = SetForwardedHeader(ctx, nil) if !DeleteForwardedHeader(ctx) { t.Errorf("ringpop was not able to identify that the forwarded header was set in the case of alread present headers") } } // SerializeThrift takes a thrift struct and returns the serialized bytes // of that struct using the thrift binary protocol. This is a temporary // measure before frames can be forwarded directly past the endpoint to the proper // destinaiton. func SerializeThrift(ctx context.Context, s athrift.TStruct) ([]byte, error) { var b []byte var buffer = bytes.NewBuffer(b) transport := athrift.NewStreamTransportW(buffer) if err := s.Write(ctx, athrift.NewTBinaryProtocolTransport(transport)); err != nil { return nil, err } if err := transport.Flush(ctx); err != nil { return nil, err } return buffer.Bytes(), nil } // DeserializeThrift takes a byte slice and attempts to write it into the // given thrift struct using the thrift binary protocol. This is a temporary // measure before frames can be forwarded directly past the endpoint to the proper // destinaiton. func
(ctx context.Context, b []byte, s athrift.TStruct) error { reader := bytes.NewReader(b) transport := athrift.NewStreamTransportR(reader) return s.Read(ctx, athrift.NewTBinaryProtocolTransport(transport)) }
DeserializeThrift
identifier_name
forwarder_test.go
// Copyright (c) 2015 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. package forward import ( "bytes" "context" json2 "encoding/json" "errors" "sync" "testing" "time" events "github.com/temporalio/ringpop-go/events/test/mocks" "github.com/temporalio/ringpop-go/test/thrift/pingpong" athrift "github.com/apache/thrift/lib/go/thrift" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/suite" "github.com/temporalio/tchannel-go" "github.com/temporalio/tchannel-go/json" "github.com/temporalio/tchannel-go/thrift" ) type ContextKey string type ForwarderTestSuite struct { suite.Suite sender *MockSender forwarder *Forwarder channel *tchannel.Channel peer *tchannel.Channel } type Ping struct { Message string `json:"message"` } func (p Ping) Bytes() []byte { data, _ := json2.Marshal(p) return data } type Pong struct { Message string `json:"message"` From string `json:"from"` Headers map[string]string } func (s *ForwarderTestSuite) registerPong(address string, channel *tchannel.Channel) { hmap := map[string]interface{}{ "/ping": func(ctx json.Context, ping *Ping) (*Pong, error) { return &Pong{"Hello, world!", address, ctx.Headers()}, nil }, "/error": func(ctx json.Context, ping *Ping) (*Pong, error) { return nil, errors.New("remote error") }, } s.Require().NoError(json.Register(channel, hmap, func(ctx context.Context, err error) {})) thriftHandler := &pingpong.MockTChanPingPong{} // successful request with context thriftHandler.On("Ping", mock.MatchedBy( func(c thrift.Context) bool { return true }), &pingpong.Ping{ Key: "ctxTest", }).Return(&pingpong.Pong{ Source: address, }, nil) // successful request thriftHandler.On("Ping", mock.Anything, &pingpong.Ping{ Key: "success", }).Return(&pingpong.Pong{ Source: address, }, nil) // error request thriftHandler.On("Ping", mock.Anything, &pingpong.Ping{ Key: "error", }).Return(nil, &pingpong.PingError{}) server := thrift.NewServer(channel) server.Register(pingpong.NewTChanPingPongServer(thriftHandler)) } func (s *ForwarderTestSuite) SetupSuite() { channel, err := tchannel.NewChannel("test", nil) s.Require().NoError(err, "channel must be created successfully") s.channel = channel peer, err := tchannel.NewChannel("test", nil) s.Require().NoError(err, "channel must be created successfully") s.registerPong("correct pinging host", peer) s.Require().NoError(peer.ListenAndServe("127.0.0.1:0"), "channel must listen") sender := &MockSender{} sender.On("Lookup", "me").Return("192.0.2.1:1", nil) sender.On("WhoAmI").Return("192.0.2.1:1", nil) // processes can not listen on port 0 so it is safe to assume that this address is failing immediatly, preventing the timeout path to kick in. sender.On("Lookup", "immediate fail").Return("127.0.0.1:0", nil) sender.On("Lookup", "reachable").Return(peer.PeerInfo().HostPort, nil) sender.On("Lookup", "unreachable").Return("192.0.2.128:1", nil) sender.On("Lookup", "error").Return("", errors.New("lookup error")) s.sender = sender s.peer = peer s.forwarder = NewForwarder(s.sender, s.channel.GetSubChannel("forwarder")) } func (s *ForwarderTestSuite) TearDownSuite() { s.channel.Close() s.peer.Close() } func (s *ForwarderTestSuite) TestForwardJSON() { var ping Ping var pong Pong dest, err := s.sender.Lookup("reachable") s.NoError(err) headerBytes := []byte(`{"hdr1": "val1"}`) res, err := s.forwarder.ForwardRequest(ping.Bytes(), dest, "test", "/ping", []string{"reachable"}, tchannel.JSON, &Options{Headers: headerBytes}) s.NoError(err, "expected request to be forwarded") s.NoError(json2.Unmarshal(res, &pong)) s.Equal("correct pinging host", pong.From) s.Equal("Hello, world!", pong.Message) s.Equal(map[string]string{"hdr1": "val1"}, pong.Headers) } func (s *ForwarderTestSuite) TestForwardJSONErrorResponse() { var ping Ping dest, err := s.sender.Lookup("reachable") s.NoError(err) _, err = s.forwarder.ForwardRequest(ping.Bytes(), dest, "test", "/error", []string{"reachable"}, tchannel.JSON, nil) s.EqualError(err, "remote error") } func (s *ForwarderTestSuite) TestForwardJSONInvalidEndpoint() { var ping Ping dest, err := s.sender.Lookup("reachable") s.NoError(err) _, err = s.forwarder.ForwardRequest(ping.Bytes(), dest, "test", "/invalid", []string{"reachable"}, tchannel.JSON, &Options{ MaxRetries: 1, RetrySchedule: []time.Duration{ 100 * time.Millisecond, }, }) s.EqualError(err, "max retries exceeded") } func (s *ForwarderTestSuite) TestForwardThrift() { dest, err := s.sender.Lookup("reachable") s.NoError(err) request := &pingpong.PingPongPingArgs{ Request: &pingpong.Ping{ Key: "success", }, } bytes, err := SerializeThrift(context.Background(), request) s.NoError(err, "expected ping to be serialized") res, err := s.forwarder.ForwardRequest(bytes, dest, "test", "PingPong::Ping", []string{"reachable"}, tchannel.Thrift, nil) s.NoError(err, "expected request to be forwarded") var response pingpong.PingPongPingResult err = DeserializeThrift(context.Background(), res, &response) s.NoError(err) s.Equal("correct pinging host", response.Success.Source) } func (s *ForwarderTestSuite) TestForwardThriftWithCtxOption() { dest, err := s.sender.Lookup("reachable") s.NoError(err) request := &pingpong.PingPongPingArgs{ Request: &pingpong.Ping{ Key: "ctxTest", }, } bytes1, err := SerializeThrift(context.Background(), request) s.NoError(err, "expected ping to be serialized") k := ContextKey("key") ctx := thrift.Wrap(context.WithValue(context.Background(), k, "val")) res, err := s.forwarder.ForwardRequest(bytes1, dest, "test", "PingPong::Ping", []string{"reachable"}, tchannel.Thrift, &Options{ Ctx: ctx, }) s.NoError(err, "expected request to be forwarded") var response pingpong.PingPongPingResult err = DeserializeThrift(context.Background(), res, &response) s.NoError(err) s.Equal("correct pinging host", response.Success.Source) } func (s *ForwarderTestSuite) TestForwardThriftErrorResponse() { dest, err := s.sender.Lookup("reachable") s.NoError(err)
Key: "error", }, } bytes, err := SerializeThrift(context.Background(), request) s.NoError(err, "expected ping to be serialized") res, err := s.forwarder.ForwardRequest(bytes, dest, "test", "PingPong::Ping", []string{"reachable"}, tchannel.Thrift, nil) s.NoError(err, "expected request to be forwarded") var response pingpong.PingPongPingResult err = DeserializeThrift(context.Background(), res, &response) s.NoError(err) s.NotNil(response.PingError, "expected a pingerror") } func (s *ForwarderTestSuite) TestMaxRetries() { var ping Ping dest, err := s.sender.Lookup("immediate fail") s.NoError(err) _, err = s.forwarder.ForwardRequest(ping.Bytes(), dest, "test", "/ping", []string{"immediate fail"}, tchannel.JSON, &Options{ MaxRetries: 2, RetrySchedule: []time.Duration{time.Millisecond, time.Millisecond}, }) s.EqualError(err, "max retries exceeded") } func (s *ForwarderTestSuite) TestLookupErrorInRetry() { var ping Ping dest, err := s.sender.Lookup("immediate fail") s.NoError(err) _, err = s.forwarder.ForwardRequest(ping.Bytes(), dest, "test", "/ping", []string{"error"}, tchannel.JSON, &Options{ MaxRetries: 2, RetrySchedule: []time.Duration{time.Millisecond, time.Millisecond}, }) // lookup errors are swallowed and result in the key missing in the dests list, so a diverged error is expected s.EqualError(err, "key destinations have diverged") } func (s *ForwarderTestSuite) TestKeysDiverged() { var ping Ping dest, err := s.sender.Lookup("immediate fail") s.NoError(err) // no keys should result in destinations length of 0 during retry, causing abortion of request _, err = s.forwarder.ForwardRequest(ping.Bytes(), dest, "test", "/ping", nil, tchannel.JSON, &Options{ MaxRetries: 2, RetrySchedule: []time.Duration{time.Millisecond, time.Millisecond}, }) s.EqualError(err, "key destinations have diverged") } func (s *ForwarderTestSuite) TestRequestTimesOut() { var ping Ping dest, err := s.sender.Lookup("unreachable") s.NoError(err) _, err = s.forwarder.ForwardRequest(ping.Bytes(), dest, "test", "/ping", []string{"unreachable"}, tchannel.JSON, &Options{ // By providing a negative timeout the context will directly return with // a DeadlineExceeded error Timeout: -1, }) s.EqualError(err, "request timed out") } func (s *ForwarderTestSuite) TestRequestRerouted() { var ping Ping var pong Pong dest, err := s.sender.Lookup("immediate fail") s.NoError(err) res, err := s.forwarder.ForwardRequest(ping.Bytes(), dest, "test", "/ping", []string{"reachable"}, tchannel.JSON, &Options{ MaxRetries: 1, RerouteRetries: true, RetrySchedule: []time.Duration{time.Millisecond}, }) s.NoError(err, "expected request to be rerouted") s.NoError(json2.Unmarshal(res, &pong)) s.Equal("correct pinging host", pong.From) s.Equal("Hello, world!", pong.Message) } func (s *ForwarderTestSuite) TestRequestNoReroutes() { var ping Ping dest, err := s.sender.Lookup("immediate fail") s.NoError(err) _, err = s.forwarder.ForwardRequest(ping.Bytes(), dest, "test", "/ping", []string{"reachable"}, tchannel.JSON, &Options{ MaxRetries: 1, RetrySchedule: []time.Duration{time.Millisecond}, }) s.EqualError(err, "max retries exceeded") } func (s *ForwarderTestSuite) TestInvalidInflightDecrement() { var wg sync.WaitGroup wg.Add(1) listener := &events.EventListener{} listener.On("HandleEvent", mock.AnythingOfTypeArgument("forward.InflightRequestsMiscountEvent")).Run(func(args mock.Arguments) { wg.Done() }).Return() s.forwarder.inflight = 0 s.forwarder.AddListener(listener) defer s.forwarder.RemoveListener(listener) s.forwarder.decrementInflight() s.Assertions.Equal(int64(0), s.forwarder.inflight, "Expected inflight to stay at 0 when decremented at 0") // wait for HandleEvent with forward.InflightRequestsMiscountEvent being called wg.Wait() } func TestForwarderTestSuite(t *testing.T) { suite.Run(t, new(ForwarderTestSuite)) } func TestSetForwardedHeader(t *testing.T) { // empty keys array test ctx, _ := thrift.NewContext(0 * time.Second) ctx = SetForwardedHeader(ctx, nil) assert.Equal(t, "[]", ctx.Headers()[ForwardedHeaderName], "expected the forwarding header to be set and be an empty array instead of null for the nil pointer") // preserve existing headers ctx, _ = thrift.NewContext(0 * time.Second) ctx = thrift.WithHeaders(ctx, map[string]string{ "keep": "this key", }) ctx = SetForwardedHeader(ctx, []string{"foo"}) assert.Equal(t, "[\"foo\"]", ctx.Headers()[ForwardedHeaderName], "expected the forwarding header to be set to a serialized array of keys used in forwarding") assert.Equal(t, "this key", ctx.Headers()["keep"], "expected the header set before the forwarding header to still exist") // multiple keys encoded in the header ctx, _ = thrift.NewContext(0 * time.Second) ctx = SetForwardedHeader(ctx, []string{"key1", "key2"}) assert.Equal(t, "[\"key1\",\"key2\"]", ctx.Headers()[ForwardedHeaderName], "expected the forwarding header to be set with both keys encoded") } func TestDeleteForwardedHeader(t *testing.T) { ctx, _ := thrift.NewContext(0 * time.Second) if DeleteForwardedHeader(ctx) { t.Errorf("ringpop claimed that the forwarded header was set before it was set") } ctx = SetForwardedHeader(ctx, nil) if !DeleteForwardedHeader(ctx) { t.Errorf("ringpop was not able to identify that the forwarded header was set") } ctx, _ = thrift.NewContext(0 * time.Second) ctx = thrift.WithHeaders(ctx, map[string]string{ "keep": "this key", }) if DeleteForwardedHeader(ctx) { t.Errorf("ringpop claimed that the forwarded header was set before it was set in the case of alread present headers") } ctx = SetForwardedHeader(ctx, nil) if !DeleteForwardedHeader(ctx) { t.Errorf("ringpop was not able to identify that the forwarded header was set in the case of alread present headers") } } // SerializeThrift takes a thrift struct and returns the serialized bytes // of that struct using the thrift binary protocol. This is a temporary // measure before frames can be forwarded directly past the endpoint to the proper // destinaiton. func SerializeThrift(ctx context.Context, s athrift.TStruct) ([]byte, error) { var b []byte var buffer = bytes.NewBuffer(b) transport := athrift.NewStreamTransportW(buffer) if err := s.Write(ctx, athrift.NewTBinaryProtocolTransport(transport)); err != nil { return nil, err } if err := transport.Flush(ctx); err != nil { return nil, err } return buffer.Bytes(), nil } // DeserializeThrift takes a byte slice and attempts to write it into the // given thrift struct using the thrift binary protocol. This is a temporary // measure before frames can be forwarded directly past the endpoint to the proper // destinaiton. func DeserializeThrift(ctx context.Context, b []byte, s athrift.TStruct) error { reader := bytes.NewReader(b) transport := athrift.NewStreamTransportR(reader) return s.Read(ctx, athrift.NewTBinaryProtocolTransport(transport)) }
request := &pingpong.PingPongPingArgs{ Request: &pingpong.Ping{
random_line_split
forwarder_test.go
// Copyright (c) 2015 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. package forward import ( "bytes" "context" json2 "encoding/json" "errors" "sync" "testing" "time" events "github.com/temporalio/ringpop-go/events/test/mocks" "github.com/temporalio/ringpop-go/test/thrift/pingpong" athrift "github.com/apache/thrift/lib/go/thrift" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/suite" "github.com/temporalio/tchannel-go" "github.com/temporalio/tchannel-go/json" "github.com/temporalio/tchannel-go/thrift" ) type ContextKey string type ForwarderTestSuite struct { suite.Suite sender *MockSender forwarder *Forwarder channel *tchannel.Channel peer *tchannel.Channel } type Ping struct { Message string `json:"message"` } func (p Ping) Bytes() []byte { data, _ := json2.Marshal(p) return data } type Pong struct { Message string `json:"message"` From string `json:"from"` Headers map[string]string } func (s *ForwarderTestSuite) registerPong(address string, channel *tchannel.Channel) { hmap := map[string]interface{}{ "/ping": func(ctx json.Context, ping *Ping) (*Pong, error) { return &Pong{"Hello, world!", address, ctx.Headers()}, nil }, "/error": func(ctx json.Context, ping *Ping) (*Pong, error) { return nil, errors.New("remote error") }, } s.Require().NoError(json.Register(channel, hmap, func(ctx context.Context, err error) {})) thriftHandler := &pingpong.MockTChanPingPong{} // successful request with context thriftHandler.On("Ping", mock.MatchedBy( func(c thrift.Context) bool { return true }), &pingpong.Ping{ Key: "ctxTest", }).Return(&pingpong.Pong{ Source: address, }, nil) // successful request thriftHandler.On("Ping", mock.Anything, &pingpong.Ping{ Key: "success", }).Return(&pingpong.Pong{ Source: address, }, nil) // error request thriftHandler.On("Ping", mock.Anything, &pingpong.Ping{ Key: "error", }).Return(nil, &pingpong.PingError{}) server := thrift.NewServer(channel) server.Register(pingpong.NewTChanPingPongServer(thriftHandler)) } func (s *ForwarderTestSuite) SetupSuite() { channel, err := tchannel.NewChannel("test", nil) s.Require().NoError(err, "channel must be created successfully") s.channel = channel peer, err := tchannel.NewChannel("test", nil) s.Require().NoError(err, "channel must be created successfully") s.registerPong("correct pinging host", peer) s.Require().NoError(peer.ListenAndServe("127.0.0.1:0"), "channel must listen") sender := &MockSender{} sender.On("Lookup", "me").Return("192.0.2.1:1", nil) sender.On("WhoAmI").Return("192.0.2.1:1", nil) // processes can not listen on port 0 so it is safe to assume that this address is failing immediatly, preventing the timeout path to kick in. sender.On("Lookup", "immediate fail").Return("127.0.0.1:0", nil) sender.On("Lookup", "reachable").Return(peer.PeerInfo().HostPort, nil) sender.On("Lookup", "unreachable").Return("192.0.2.128:1", nil) sender.On("Lookup", "error").Return("", errors.New("lookup error")) s.sender = sender s.peer = peer s.forwarder = NewForwarder(s.sender, s.channel.GetSubChannel("forwarder")) } func (s *ForwarderTestSuite) TearDownSuite() { s.channel.Close() s.peer.Close() } func (s *ForwarderTestSuite) TestForwardJSON() { var ping Ping var pong Pong dest, err := s.sender.Lookup("reachable") s.NoError(err) headerBytes := []byte(`{"hdr1": "val1"}`) res, err := s.forwarder.ForwardRequest(ping.Bytes(), dest, "test", "/ping", []string{"reachable"}, tchannel.JSON, &Options{Headers: headerBytes}) s.NoError(err, "expected request to be forwarded") s.NoError(json2.Unmarshal(res, &pong)) s.Equal("correct pinging host", pong.From) s.Equal("Hello, world!", pong.Message) s.Equal(map[string]string{"hdr1": "val1"}, pong.Headers) } func (s *ForwarderTestSuite) TestForwardJSONErrorResponse() { var ping Ping dest, err := s.sender.Lookup("reachable") s.NoError(err) _, err = s.forwarder.ForwardRequest(ping.Bytes(), dest, "test", "/error", []string{"reachable"}, tchannel.JSON, nil) s.EqualError(err, "remote error") } func (s *ForwarderTestSuite) TestForwardJSONInvalidEndpoint() { var ping Ping dest, err := s.sender.Lookup("reachable") s.NoError(err) _, err = s.forwarder.ForwardRequest(ping.Bytes(), dest, "test", "/invalid", []string{"reachable"}, tchannel.JSON, &Options{ MaxRetries: 1, RetrySchedule: []time.Duration{ 100 * time.Millisecond, }, }) s.EqualError(err, "max retries exceeded") } func (s *ForwarderTestSuite) TestForwardThrift() { dest, err := s.sender.Lookup("reachable") s.NoError(err) request := &pingpong.PingPongPingArgs{ Request: &pingpong.Ping{ Key: "success", }, } bytes, err := SerializeThrift(context.Background(), request) s.NoError(err, "expected ping to be serialized") res, err := s.forwarder.ForwardRequest(bytes, dest, "test", "PingPong::Ping", []string{"reachable"}, tchannel.Thrift, nil) s.NoError(err, "expected request to be forwarded") var response pingpong.PingPongPingResult err = DeserializeThrift(context.Background(), res, &response) s.NoError(err) s.Equal("correct pinging host", response.Success.Source) } func (s *ForwarderTestSuite) TestForwardThriftWithCtxOption() { dest, err := s.sender.Lookup("reachable") s.NoError(err) request := &pingpong.PingPongPingArgs{ Request: &pingpong.Ping{ Key: "ctxTest", }, } bytes1, err := SerializeThrift(context.Background(), request) s.NoError(err, "expected ping to be serialized") k := ContextKey("key") ctx := thrift.Wrap(context.WithValue(context.Background(), k, "val")) res, err := s.forwarder.ForwardRequest(bytes1, dest, "test", "PingPong::Ping", []string{"reachable"}, tchannel.Thrift, &Options{ Ctx: ctx, }) s.NoError(err, "expected request to be forwarded") var response pingpong.PingPongPingResult err = DeserializeThrift(context.Background(), res, &response) s.NoError(err) s.Equal("correct pinging host", response.Success.Source) } func (s *ForwarderTestSuite) TestForwardThriftErrorResponse() { dest, err := s.sender.Lookup("reachable") s.NoError(err) request := &pingpong.PingPongPingArgs{ Request: &pingpong.Ping{ Key: "error", }, } bytes, err := SerializeThrift(context.Background(), request) s.NoError(err, "expected ping to be serialized") res, err := s.forwarder.ForwardRequest(bytes, dest, "test", "PingPong::Ping", []string{"reachable"}, tchannel.Thrift, nil) s.NoError(err, "expected request to be forwarded") var response pingpong.PingPongPingResult err = DeserializeThrift(context.Background(), res, &response) s.NoError(err) s.NotNil(response.PingError, "expected a pingerror") } func (s *ForwarderTestSuite) TestMaxRetries() { var ping Ping dest, err := s.sender.Lookup("immediate fail") s.NoError(err) _, err = s.forwarder.ForwardRequest(ping.Bytes(), dest, "test", "/ping", []string{"immediate fail"}, tchannel.JSON, &Options{ MaxRetries: 2, RetrySchedule: []time.Duration{time.Millisecond, time.Millisecond}, }) s.EqualError(err, "max retries exceeded") } func (s *ForwarderTestSuite) TestLookupErrorInRetry() { var ping Ping dest, err := s.sender.Lookup("immediate fail") s.NoError(err) _, err = s.forwarder.ForwardRequest(ping.Bytes(), dest, "test", "/ping", []string{"error"}, tchannel.JSON, &Options{ MaxRetries: 2, RetrySchedule: []time.Duration{time.Millisecond, time.Millisecond}, }) // lookup errors are swallowed and result in the key missing in the dests list, so a diverged error is expected s.EqualError(err, "key destinations have diverged") } func (s *ForwarderTestSuite) TestKeysDiverged() { var ping Ping dest, err := s.sender.Lookup("immediate fail") s.NoError(err) // no keys should result in destinations length of 0 during retry, causing abortion of request _, err = s.forwarder.ForwardRequest(ping.Bytes(), dest, "test", "/ping", nil, tchannel.JSON, &Options{ MaxRetries: 2, RetrySchedule: []time.Duration{time.Millisecond, time.Millisecond}, }) s.EqualError(err, "key destinations have diverged") } func (s *ForwarderTestSuite) TestRequestTimesOut() { var ping Ping dest, err := s.sender.Lookup("unreachable") s.NoError(err) _, err = s.forwarder.ForwardRequest(ping.Bytes(), dest, "test", "/ping", []string{"unreachable"}, tchannel.JSON, &Options{ // By providing a negative timeout the context will directly return with // a DeadlineExceeded error Timeout: -1, }) s.EqualError(err, "request timed out") } func (s *ForwarderTestSuite) TestRequestRerouted() { var ping Ping var pong Pong dest, err := s.sender.Lookup("immediate fail") s.NoError(err) res, err := s.forwarder.ForwardRequest(ping.Bytes(), dest, "test", "/ping", []string{"reachable"}, tchannel.JSON, &Options{ MaxRetries: 1, RerouteRetries: true, RetrySchedule: []time.Duration{time.Millisecond}, }) s.NoError(err, "expected request to be rerouted") s.NoError(json2.Unmarshal(res, &pong)) s.Equal("correct pinging host", pong.From) s.Equal("Hello, world!", pong.Message) } func (s *ForwarderTestSuite) TestRequestNoReroutes() { var ping Ping dest, err := s.sender.Lookup("immediate fail") s.NoError(err) _, err = s.forwarder.ForwardRequest(ping.Bytes(), dest, "test", "/ping", []string{"reachable"}, tchannel.JSON, &Options{ MaxRetries: 1, RetrySchedule: []time.Duration{time.Millisecond}, }) s.EqualError(err, "max retries exceeded") } func (s *ForwarderTestSuite) TestInvalidInflightDecrement() { var wg sync.WaitGroup wg.Add(1) listener := &events.EventListener{} listener.On("HandleEvent", mock.AnythingOfTypeArgument("forward.InflightRequestsMiscountEvent")).Run(func(args mock.Arguments) { wg.Done() }).Return() s.forwarder.inflight = 0 s.forwarder.AddListener(listener) defer s.forwarder.RemoveListener(listener) s.forwarder.decrementInflight() s.Assertions.Equal(int64(0), s.forwarder.inflight, "Expected inflight to stay at 0 when decremented at 0") // wait for HandleEvent with forward.InflightRequestsMiscountEvent being called wg.Wait() } func TestForwarderTestSuite(t *testing.T) { suite.Run(t, new(ForwarderTestSuite)) } func TestSetForwardedHeader(t *testing.T) { // empty keys array test ctx, _ := thrift.NewContext(0 * time.Second) ctx = SetForwardedHeader(ctx, nil) assert.Equal(t, "[]", ctx.Headers()[ForwardedHeaderName], "expected the forwarding header to be set and be an empty array instead of null for the nil pointer") // preserve existing headers ctx, _ = thrift.NewContext(0 * time.Second) ctx = thrift.WithHeaders(ctx, map[string]string{ "keep": "this key", }) ctx = SetForwardedHeader(ctx, []string{"foo"}) assert.Equal(t, "[\"foo\"]", ctx.Headers()[ForwardedHeaderName], "expected the forwarding header to be set to a serialized array of keys used in forwarding") assert.Equal(t, "this key", ctx.Headers()["keep"], "expected the header set before the forwarding header to still exist") // multiple keys encoded in the header ctx, _ = thrift.NewContext(0 * time.Second) ctx = SetForwardedHeader(ctx, []string{"key1", "key2"}) assert.Equal(t, "[\"key1\",\"key2\"]", ctx.Headers()[ForwardedHeaderName], "expected the forwarding header to be set with both keys encoded") } func TestDeleteForwardedHeader(t *testing.T) { ctx, _ := thrift.NewContext(0 * time.Second) if DeleteForwardedHeader(ctx)
ctx = SetForwardedHeader(ctx, nil) if !DeleteForwardedHeader(ctx) { t.Errorf("ringpop was not able to identify that the forwarded header was set") } ctx, _ = thrift.NewContext(0 * time.Second) ctx = thrift.WithHeaders(ctx, map[string]string{ "keep": "this key", }) if DeleteForwardedHeader(ctx) { t.Errorf("ringpop claimed that the forwarded header was set before it was set in the case of alread present headers") } ctx = SetForwardedHeader(ctx, nil) if !DeleteForwardedHeader(ctx) { t.Errorf("ringpop was not able to identify that the forwarded header was set in the case of alread present headers") } } // SerializeThrift takes a thrift struct and returns the serialized bytes // of that struct using the thrift binary protocol. This is a temporary // measure before frames can be forwarded directly past the endpoint to the proper // destinaiton. func SerializeThrift(ctx context.Context, s athrift.TStruct) ([]byte, error) { var b []byte var buffer = bytes.NewBuffer(b) transport := athrift.NewStreamTransportW(buffer) if err := s.Write(ctx, athrift.NewTBinaryProtocolTransport(transport)); err != nil { return nil, err } if err := transport.Flush(ctx); err != nil { return nil, err } return buffer.Bytes(), nil } // DeserializeThrift takes a byte slice and attempts to write it into the // given thrift struct using the thrift binary protocol. This is a temporary // measure before frames can be forwarded directly past the endpoint to the proper // destinaiton. func DeserializeThrift(ctx context.Context, b []byte, s athrift.TStruct) error { reader := bytes.NewReader(b) transport := athrift.NewStreamTransportR(reader) return s.Read(ctx, athrift.NewTBinaryProtocolTransport(transport)) }
{ t.Errorf("ringpop claimed that the forwarded header was set before it was set") }
conditional_block
myrule2.js
var firstContent; var firstLink; var nowOffset; var articleLinkArr = []; var flag = true; var http = require('http'); //var MongoClient = require('mongodb').MongoClient; //var DB_CONN_STR = 'mongodb://localhost:27017/alex'; // 数据库为 runoob //获取当前时间 function getNowFormatDate() { var date = new Date(); var seperator1 = "-"; var seperator2 = ":"; var month = date.getMonth() + 1; var strDate = date.getDate(); if (month >= 1 && month <= 9) { month = "0" + month; } if (strDate >= 0 && strDate <= 9) { strDate = "0" + strDate; } var currentdate = date.getFullYear() + seperator1 + month + seperator1 + strDate + " " + date.getHours() + seperator2 + date.getMinutes() + seperator2 + date.getSeconds(); return currentdate; } var insertData = function(db, callback) { //连接到表 site var collection = db.collection('site'); //插入数据 data = articleLinkArr; collection.insert(data, function(err, result) { if(err) { console.log('Error:'+ err); return; } callback(result); }); }; module.exports = { token: Date.now(), summary: "哈工大(威海) 基于中间人攻击的微信公众号爬虫", getNextChunk: function (url, delay, nonce) { if (nonce) { var next = '<script nonce="' + nonce + '" type="text/javascript">'; } else { var next = '<script type="text/javascript">'; } next += 'setTimeout(function(){window.location.href="' + url + '";},' + delay + ');'; next += 'setTimeout(function(){window.location.href="' + url + '";},10000);'; next += '</script>'; return next; }, getNotification: function () { return '<h1 style="color:red; font-size:20px; text-align: center; margin-top: 10px; margin-bottom: 10px;">' + '哈工大(威海)提示:10秒后没有自动刷新请手动刷新</h1>'; }, getNextUrl: function (currentUrl, rawList, appmsg_token) { console.log("开始捕获下一页历史消息、、、、、、"); if (!rawList) { return ''; } var currentUrlArr = currentUrl.split("&"); var nextHistoryPageArr = []; for(var item in currentUrlArr){ //console.log(currentUrlArr[item]); if(currentUrlArr[item].substring(0,5)=="/mp/p"||currentUrlArr[item].substring(0,5)=="__biz"|| currentUrlArr[item].substring(0,5)=="scene"|| currentUrlArr[item].substring(0,5)=="pass_"){ nextHistoryPageArr.push(currentUrlArr[item]); } } nextHistoryPageUrl = nextHistoryPageArr.join('&'); nextHistoryPageUrl += "&f=json"; //偏移量将在主函数中由函数自动修改 这里不必更改 nextHistoryPageUrl += "&offset=10"; nextHistoryPageUrl += "&count=10&is_ok=1"; nextHistoryPageUrl += "&uin=777&key=777"; nextHistoryPageUrl += "&wxtoken="; nextHistoryPageUrl += "&appmsg_token="; nextHistoryPageUrl += appmsg_token; nextHistoryPageUrl = "https://mp.weixin.qq.com" + nextHistoryPageUrl; nextHistoryPageUrl = nextHistoryPageUrl.replace("home", "getmsg"); console.log("this is raw!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!"); console.log(nextHistoryPageUrl); firstLink = nextHistoryPageUrl; return nextHistoryPageUrl; }, replaceResponseHeader: function(req,res,header){ header = header || {}; console.log("开始:报头由json改为html") if(flag && /mp\/profile_ext\?action=getmsg/i.test(req.url)) { console.log("we have re[.p;ace1"); header['content-type'] = "text/html; charset=UTF-8"; } console.log("成功结束:报头由json改为html") return header; }, //入口函数 replaceServerResDataAsync: function (req, res, serverResData, callback) { console.log("抓捕到数据包。。。"); // console.log(articleLinkArr.size); if(/mp\/profile_ext\?action=home/i.test(req.url)){ try{ var historyHomePage = /var msgList = \'(.*?)\';/; var historyHomePageList = historyHomePage.exec(serverResData.toString()); if(!historyHomePageList){ callback(serverResData); console.log("抓捕到空包!!"); return; } historyHomePageList[1] = historyHomePageList[1].replace(/&quot;/g, "'"); var historyHomePageObj = eval("("+historyHomePageList[1]+")");
//问题:抓包显示有8个介绍界面,但只在historyHomePageList中获取到4个,原因是正则匹配时有问题?? //解决:列表中间的一个historyHomePageObj['list'][item]["app_msg_ext_info"]为undefined, 异常阻止了其他 // 介绍页面的获取! for(var item in historyHomePageObj['list']){ console.log(item); if(historyHomePageObj['list'][item]["app_msg_ext_info"]==undefined){ continue; } console.log(historyHomePageObj['list'][item]["app_msg_ext_info"]["content_url"]); var title = historyHomePageObj.list[item].app_msg_ext_info.title; var author = historyHomePageObj.list[item].app_msg_ext_info.author; var content_url = historyHomePageObj['list'][item]["app_msg_ext_info"]["content_url"]; var datetime = historyHomePageObj.list[item].comm_msg_info.datetime; var id = historyHomePageObj.list[item].comm_msg_info.id; console.log(title); //公众号名称 var nickname_pattern = /var nickname = \"(.*?)\"/; var nickname = nickname_pattern.exec(serverResData.toString())[1]; console.log("公众号的名字是————————", nickname); //当前历史页的文章各种信息 var getdatetime = getNowFormatDate(); var articleJson = { "title": title, "author": author, "content_url": content_url, "datetime": datetime, "id": id, "getdatetime": getdatetime }; // articleLinkArr.push(nickname); articleLinkArr.push(articleJson); } console.log(result); /* MongoClient.connect(DB_CONN_STR, function(err, db) { console.log("连接MongoDB成功!"); insertData(db, function(result) { console.log(result); db.close(); articleLinkArr = []; }); }); */ var appmsg_token_pattern = /window.appmsg_token = \"(.*?)\";/; var appmsg_token = appmsg_token_pattern.exec(serverResData.toString())[1]; var nextHistoryPageUrl = this.getNextUrl(req.url, historyHomePageList, appmsg_token); firstContent = serverResData; //注入跳转下一历史页面的js var next = this.getNextChunk(nextHistoryPageUrl, 6000); var note = this.getNotification(); serverResData = note + serverResData + next; nowOffset = 0; console.log("成功获取到第一页历史消息页面666666666666666666666__end"); callback(serverResData); } catch (e){ callback(serverResData); } } else if(/mp\/profile_ext\?action=getmsg/i.test(req.url)){ try { if(!serverResData){ console.log("抓取公众号全部历史文章结束!"); return; } nowOffset += 10; firstLink = firstLink.replace("&offset="+nowOffset.toString(), "&offset="+(nowOffset+10).toString()); //注入跳转再下一页的js var note = this.getNotification(); var next = this.getNextChunk(firstLink, 6000); var newContent = note + firstContent + next; var newData = serverResData; var ResDataobj = JSON.parse(newData.toString()); var general_msg_list = ResDataobj['general_msg_list']; var listJson = JSON.parse(general_msg_list); for(var artileIndex in listJson.list){ try { var title = listJson.list[artileIndex].app_msg_ext_info.title; var author = listJson.list[artileIndex].app_msg_ext_info.author; var content_url = listJson.list[artileIndex].app_msg_ext_info.content_url; var datetime = listJson.list[artileIndex].comm_msg_info.datetime; var id = listJson.list[artileIndex].comm_msg_info.id; console.log(title); console.log(content_url); console.log(id); console.log(datetime); //当前历史页的文章各种信息 var getdatetime = getNowFormatDate(); var articleJson = { "title": title, "author": author, "content_url": content_url, "datetime": datetime, "id": id, "getdatetime": getdatetime }; console.log("__________", articleLinkArr); articleLinkArr.push(articleJson); console.log("__________", articleLinkArr); } catch (e){ console.log(listJson.list[artileIndex]); console.log("获取某个属性时出错! 可能为短消息,不是历史文章", 'red'); } } /* MongoClient.connect(DB_CONN_STR, function(err, db) { console.log("连接MongoDB成功!"); insertData(db, function(result) { console.log(result); db.close(); articleLinkArr = []; }); }); */ console.log(result); console.log("已成功保存下一页历史消息(原json)"); callback(newContent); } catch (e){ console.log("waht??????????????????????????????????????????2"); callback(serverResData); } } else{ callback(serverResData); } } };
random_line_split
myrule2.js
var firstContent; var firstLink; var nowOffset; var articleLinkArr = []; var flag = true; var http = require('http'); //var MongoClient = require('mongodb').MongoClient; //var DB_CONN_STR = 'mongodb://localhost:27017/alex'; // 数据库为 runoob //获取当前时间 function getNowFormatDate() { var date = new Date(); var seperator1 = "-"; var seperator2 = ":"; var month = date.getMonth() + 1; var strDate = date.getDate(); if (month >= 1 && month <= 9) { month = "0" + month; } if (strDate >= 0 && strDate <= 9) { strDate = "0" + strDate; } var currentdate = date.getFullYear() + seperator1 + month + seperator1 + strDate + " " + date.getHours() + seperator2 + date.getMinutes() + seperator2 + date.getSeconds(); return currentdate; } var insertData = function(db, callback) { //连接到表 site var collection = db.collection('site'); //插入数据 data = articleLinkArr; collection.insert(data, function(err, result) { if(err) { console.log('Error:'+ err); return; } callback(result); }); }; module.exports = { token: Date.now(), summary: "哈工大(威海) 基于中间人攻击的微信公众号爬虫", getNextChunk: function (url, delay, nonce) { if (nonce) { var next = '<script nonce="' + nonce + '" type="text/javas
} next += 'setTimeout(function(){window.location.href="' + url + '";},' + delay + ');'; next += 'setTimeout(function(){window.location.href="' + url + '";},10000);'; next += '</script>'; return next; }, getNotification: function () { return '<h1 style="color:red; font-size:20px; text-align: center; margin-top: 10px; margin-bottom: 10px;">' + '哈工大(威海)提示:10秒后没有自动刷新请手动刷新</h1>'; }, getNextUrl: function (currentUrl, rawList, appmsg_token) { console.log("开始捕获下一页历史消息、、、、、、"); if (!rawList) { return ''; } var currentUrlArr = currentUrl.split("&"); var nextHistoryPageArr = []; for(var item in currentUrlArr){ //console.log(currentUrlArr[item]); if(currentUrlArr[item].substring(0,5)=="/mp/p"||currentUrlArr[item].substring(0,5)=="__biz"|| currentUrlArr[item].substring(0,5)=="scene"|| currentUrlArr[item].substring(0,5)=="pass_"){ nextHistoryPageArr.push(currentUrlArr[item]); } } nextHistoryPageUrl = nextHistoryPageArr.join('&'); nextHistoryPageUrl += "&f=json"; //偏移量将在主函数中由函数自动修改 这里不必更改 nextHistoryPageUrl += "&offset=10"; nextHistoryPageUrl += "&count=10&is_ok=1"; nextHistoryPageUrl += "&uin=777&key=777"; nextHistoryPageUrl += "&wxtoken="; nextHistoryPageUrl += "&appmsg_token="; nextHistoryPageUrl += appmsg_token; nextHistoryPageUrl = "https://mp.weixin.qq.com" + nextHistoryPageUrl; nextHistoryPageUrl = nextHistoryPageUrl.replace("home", "getmsg"); console.log("this is raw!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!"); console.log(nextHistoryPageUrl); firstLink = nextHistoryPageUrl; return nextHistoryPageUrl; }, replaceResponseHeader: function(req,res,header){ header = header || {}; console.log("开始:报头由json改为html") if(flag && /mp\/profile_ext\?action=getmsg/i.test(req.url)) { console.log("we have re[.p;ace1"); header['content-type'] = "text/html; charset=UTF-8"; } console.log("成功结束:报头由json改为html") return header; }, //入口函数 replaceServerResDataAsync: function (req, res, serverResData, callback) { console.log("抓捕到数据包。。。"); // console.log(articleLinkArr.size); if(/mp\/profile_ext\?action=home/i.test(req.url)){ try{ var historyHomePage = /var msgList = \'(.*?)\';/; var historyHomePageList = historyHomePage.exec(serverResData.toString()); if(!historyHomePageList){ callback(serverResData); console.log("抓捕到空包!!"); return; } historyHomePageList[1] = historyHomePageList[1].replace(/&quot;/g, "'"); var historyHomePageObj = eval("("+historyHomePageList[1]+")"); //问题:抓包显示有8个介绍界面,但只在historyHomePageList中获取到4个,原因是正则匹配时有问题?? //解决:列表中间的一个historyHomePageObj['list'][item]["app_msg_ext_info"]为undefined, 异常阻止了其他 // 介绍页面的获取! for(var item in historyHomePageObj['list']){ console.log(item); if(historyHomePageObj['list'][item]["app_msg_ext_info"]==undefined){ continue; } console.log(historyHomePageObj['list'][item]["app_msg_ext_info"]["content_url"]); var title = historyHomePageObj.list[item].app_msg_ext_info.title; var author = historyHomePageObj.list[item].app_msg_ext_info.author; var content_url = historyHomePageObj['list'][item]["app_msg_ext_info"]["content_url"]; var datetime = historyHomePageObj.list[item].comm_msg_info.datetime; var id = historyHomePageObj.list[item].comm_msg_info.id; console.log(title); //公众号名称 var nickname_pattern = /var nickname = \"(.*?)\"/; var nickname = nickname_pattern.exec(serverResData.toString())[1]; console.log("公众号的名字是————————", nickname); //当前历史页的文章各种信息 var getdatetime = getNowFormatDate(); var articleJson = { "title": title, "author": author, "content_url": content_url, "datetime": datetime, "id": id, "getdatetime": getdatetime }; // articleLinkArr.push(nickname); articleLinkArr.push(articleJson); } console.log(result); /* MongoClient.connect(DB_CONN_STR, function(err, db) { console.log("连接MongoDB成功!"); insertData(db, function(result) { console.log(result); db.close(); articleLinkArr = []; }); }); */ var appmsg_token_pattern = /window.appmsg_token = \"(.*?)\";/; var appmsg_token = appmsg_token_pattern.exec(serverResData.toString())[1]; var nextHistoryPageUrl = this.getNextUrl(req.url, historyHomePageList, appmsg_token); firstContent = serverResData; //注入跳转下一历史页面的js var next = this.getNextChunk(nextHistoryPageUrl, 6000); var note = this.getNotification(); serverResData = note + serverResData + next; nowOffset = 0; console.log("成功获取到第一页历史消息页面666666666666666666666__end"); callback(serverResData); } catch (e){ callback(serverResData); } } else if(/mp\/profile_ext\?action=getmsg/i.test(req.url)){ try { if(!serverResData){ console.log("抓取公众号全部历史文章结束!"); return; } nowOffset += 10; firstLink = firstLink.replace("&offset="+nowOffset.toString(), "&offset="+(nowOffset+10).toString()); //注入跳转再下一页的js var note = this.getNotification(); var next = this.getNextChunk(firstLink, 6000); var newContent = note + firstContent + next; var newData = serverResData; var ResDataobj = JSON.parse(newData.toString()); var general_msg_list = ResDataobj['general_msg_list']; var listJson = JSON.parse(general_msg_list); for(var artileIndex in listJson.list){ try { var title = listJson.list[artileIndex].app_msg_ext_info.title; var author = listJson.list[artileIndex].app_msg_ext_info.author; var content_url = listJson.list[artileIndex].app_msg_ext_info.content_url; var datetime = listJson.list[artileIndex].comm_msg_info.datetime; var id = listJson.list[artileIndex].comm_msg_info.id; console.log(title); console.log(content_url); console.log(id); console.log(datetime); //当前历史页的文章各种信息 var getdatetime = getNowFormatDate(); var articleJson = { "title": title, "author": author, "content_url": content_url, "datetime": datetime, "id": id, "getdatetime": getdatetime }; console.log("__________", articleLinkArr); articleLinkArr.push(articleJson); console.log("__________", articleLinkArr); } catch (e){ console.log(listJson.list[artileIndex]); console.log("获取某个属性时出错! 可能为短消息,不是历史文章", 'red'); } } /* MongoClient.connect(DB_CONN_STR, function(err, db) { console.log("连接MongoDB成功!"); insertData(db, function(result) { console.log(result); db.close(); articleLinkArr = []; }); }); */ console.log(result); console.log("已成功保存下一页历史消息(原json)"); callback(newContent); } catch (e){ console.log("waht??????????????????????????????????????????2"); callback(serverResData); } } else{ callback(serverResData); } } };
cript">'; } else { var next = '<script type="text/javascript">';
conditional_block
myrule2.js
var firstContent; var firstLink; var nowOffset; var articleLinkArr = []; var flag = true; var http = require('http'); //var MongoClient = require('mongodb').MongoClient; //var DB_CONN_STR = 'mongodb://localhost:27017/alex'; // 数据库为 runoob //获取当前时间 function getNowFormatDate() { var date = new
function(db, callback) { //连接到表 site var collection = db.collection('site'); //插入数据 data = articleLinkArr; collection.insert(data, function(err, result) { if(err) { console.log('Error:'+ err); return; } callback(result); }); }; module.exports = { token: Date.now(), summary: "哈工大(威海) 基于中间人攻击的微信公众号爬虫", getNextChunk: function (url, delay, nonce) { if (nonce) { var next = '<script nonce="' + nonce + '" type="text/javascript">'; } else { var next = '<script type="text/javascript">'; } next += 'setTimeout(function(){window.location.href="' + url + '";},' + delay + ');'; next += 'setTimeout(function(){window.location.href="' + url + '";},10000);'; next += '</script>'; return next; }, getNotification: function () { return '<h1 style="color:red; font-size:20px; text-align: center; margin-top: 10px; margin-bottom: 10px;">' + '哈工大(威海)提示:10秒后没有自动刷新请手动刷新</h1>'; }, getNextUrl: function (currentUrl, rawList, appmsg_token) { console.log("开始捕获下一页历史消息、、、、、、"); if (!rawList) { return ''; } var currentUrlArr = currentUrl.split("&"); var nextHistoryPageArr = []; for(var item in currentUrlArr){ //console.log(currentUrlArr[item]); if(currentUrlArr[item].substring(0,5)=="/mp/p"||currentUrlArr[item].substring(0,5)=="__biz"|| currentUrlArr[item].substring(0,5)=="scene"|| currentUrlArr[item].substring(0,5)=="pass_"){ nextHistoryPageArr.push(currentUrlArr[item]); } } nextHistoryPageUrl = nextHistoryPageArr.join('&'); nextHistoryPageUrl += "&f=json"; //偏移量将在主函数中由函数自动修改 这里不必更改 nextHistoryPageUrl += "&offset=10"; nextHistoryPageUrl += "&count=10&is_ok=1"; nextHistoryPageUrl += "&uin=777&key=777"; nextHistoryPageUrl += "&wxtoken="; nextHistoryPageUrl += "&appmsg_token="; nextHistoryPageUrl += appmsg_token; nextHistoryPageUrl = "https://mp.weixin.qq.com" + nextHistoryPageUrl; nextHistoryPageUrl = nextHistoryPageUrl.replace("home", "getmsg"); console.log("this is raw!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!"); console.log(nextHistoryPageUrl); firstLink = nextHistoryPageUrl; return nextHistoryPageUrl; }, replaceResponseHeader: function(req,res,header){ header = header || {}; console.log("开始:报头由json改为html") if(flag && /mp\/profile_ext\?action=getmsg/i.test(req.url)) { console.log("we have re[.p;ace1"); header['content-type'] = "text/html; charset=UTF-8"; } console.log("成功结束:报头由json改为html") return header; }, //入口函数 replaceServerResDataAsync: function (req, res, serverResData, callback) { console.log("抓捕到数据包。。。"); // console.log(articleLinkArr.size); if(/mp\/profile_ext\?action=home/i.test(req.url)){ try{ var historyHomePage = /var msgList = \'(.*?)\';/; var historyHomePageList = historyHomePage.exec(serverResData.toString()); if(!historyHomePageList){ callback(serverResData); console.log("抓捕到空包!!"); return; } historyHomePageList[1] = historyHomePageList[1].replace(/&quot;/g, "'"); var historyHomePageObj = eval("("+historyHomePageList[1]+")"); //问题:抓包显示有8个介绍界面,但只在historyHomePageList中获取到4个,原因是正则匹配时有问题?? //解决:列表中间的一个historyHomePageObj['list'][item]["app_msg_ext_info"]为undefined, 异常阻止了其他 // 介绍页面的获取! for(var item in historyHomePageObj['list']){ console.log(item); if(historyHomePageObj['list'][item]["app_msg_ext_info"]==undefined){ continue; } console.log(historyHomePageObj['list'][item]["app_msg_ext_info"]["content_url"]); var title = historyHomePageObj.list[item].app_msg_ext_info.title; var author = historyHomePageObj.list[item].app_msg_ext_info.author; var content_url = historyHomePageObj['list'][item]["app_msg_ext_info"]["content_url"]; var datetime = historyHomePageObj.list[item].comm_msg_info.datetime; var id = historyHomePageObj.list[item].comm_msg_info.id; console.log(title); //公众号名称 var nickname_pattern = /var nickname = \"(.*?)\"/; var nickname = nickname_pattern.exec(serverResData.toString())[1]; console.log("公众号的名字是————————", nickname); //当前历史页的文章各种信息 var getdatetime = getNowFormatDate(); var articleJson = { "title": title, "author": author, "content_url": content_url, "datetime": datetime, "id": id, "getdatetime": getdatetime }; // articleLinkArr.push(nickname); articleLinkArr.push(articleJson); } console.log(result); /* MongoClient.connect(DB_CONN_STR, function(err, db) { console.log("连接MongoDB成功!"); insertData(db, function(result) { console.log(result); db.close(); articleLinkArr = []; }); }); */ var appmsg_token_pattern = /window.appmsg_token = \"(.*?)\";/; var appmsg_token = appmsg_token_pattern.exec(serverResData.toString())[1]; var nextHistoryPageUrl = this.getNextUrl(req.url, historyHomePageList, appmsg_token); firstContent = serverResData; //注入跳转下一历史页面的js var next = this.getNextChunk(nextHistoryPageUrl, 6000); var note = this.getNotification(); serverResData = note + serverResData + next; nowOffset = 0; console.log("成功获取到第一页历史消息页面666666666666666666666__end"); callback(serverResData); } catch (e){ callback(serverResData); } } else if(/mp\/profile_ext\?action=getmsg/i.test(req.url)){ try { if(!serverResData){ console.log("抓取公众号全部历史文章结束!"); return; } nowOffset += 10; firstLink = firstLink.replace("&offset="+nowOffset.toString(), "&offset="+(nowOffset+10).toString()); //注入跳转再下一页的js var note = this.getNotification(); var next = this.getNextChunk(firstLink, 6000); var newContent = note + firstContent + next; var newData = serverResData; var ResDataobj = JSON.parse(newData.toString()); var general_msg_list = ResDataobj['general_msg_list']; var listJson = JSON.parse(general_msg_list); for(var artileIndex in listJson.list){ try { var title = listJson.list[artileIndex].app_msg_ext_info.title; var author = listJson.list[artileIndex].app_msg_ext_info.author; var content_url = listJson.list[artileIndex].app_msg_ext_info.content_url; var datetime = listJson.list[artileIndex].comm_msg_info.datetime; var id = listJson.list[artileIndex].comm_msg_info.id; console.log(title); console.log(content_url); console.log(id); console.log(datetime); //当前历史页的文章各种信息 var getdatetime = getNowFormatDate(); var articleJson = { "title": title, "author": author, "content_url": content_url, "datetime": datetime, "id": id, "getdatetime": getdatetime }; console.log("__________", articleLinkArr); articleLinkArr.push(articleJson); console.log("__________", articleLinkArr); } catch (e){ console.log(listJson.list[artileIndex]); console.log("获取某个属性时出错! 可能为短消息,不是历史文章", 'red'); } } /* MongoClient.connect(DB_CONN_STR, function(err, db) { console.log("连接MongoDB成功!"); insertData(db, function(result) { console.log(result); db.close(); articleLinkArr = []; }); }); */ console.log(result); console.log("已成功保存下一页历史消息(原json)"); callback(newContent); } catch (e){ console.log("waht??????????????????????????????????????????2"); callback(serverResData); } } else{ callback(serverResData); } } };
Date(); var seperator1 = "-"; var seperator2 = ":"; var month = date.getMonth() + 1; var strDate = date.getDate(); if (month >= 1 && month <= 9) { month = "0" + month; } if (strDate >= 0 && strDate <= 9) { strDate = "0" + strDate; } var currentdate = date.getFullYear() + seperator1 + month + seperator1 + strDate + " " + date.getHours() + seperator2 + date.getMinutes() + seperator2 + date.getSeconds(); return currentdate; } var insertData =
identifier_body
myrule2.js
var firstContent; var firstLink; var nowOffset; var articleLinkArr = []; var flag = true; var http = require('http'); //var MongoClient = require('mongodb').MongoClient; //var DB_CONN_STR = 'mongodb://localhost:27017/alex'; // 数据库为 runoob //获取当前时间 function getNowFormatDate() {
new Date(); var seperator1 = "-"; var seperator2 = ":"; var month = date.getMonth() + 1; var strDate = date.getDate(); if (month >= 1 && month <= 9) { month = "0" + month; } if (strDate >= 0 && strDate <= 9) { strDate = "0" + strDate; } var currentdate = date.getFullYear() + seperator1 + month + seperator1 + strDate + " " + date.getHours() + seperator2 + date.getMinutes() + seperator2 + date.getSeconds(); return currentdate; } var insertData = function(db, callback) { //连接到表 site var collection = db.collection('site'); //插入数据 data = articleLinkArr; collection.insert(data, function(err, result) { if(err) { console.log('Error:'+ err); return; } callback(result); }); }; module.exports = { token: Date.now(), summary: "哈工大(威海) 基于中间人攻击的微信公众号爬虫", getNextChunk: function (url, delay, nonce) { if (nonce) { var next = '<script nonce="' + nonce + '" type="text/javascript">'; } else { var next = '<script type="text/javascript">'; } next += 'setTimeout(function(){window.location.href="' + url + '";},' + delay + ');'; next += 'setTimeout(function(){window.location.href="' + url + '";},10000);'; next += '</script>'; return next; }, getNotification: function () { return '<h1 style="color:red; font-size:20px; text-align: center; margin-top: 10px; margin-bottom: 10px;">' + '哈工大(威海)提示:10秒后没有自动刷新请手动刷新</h1>'; }, getNextUrl: function (currentUrl, rawList, appmsg_token) { console.log("开始捕获下一页历史消息、、、、、、"); if (!rawList) { return ''; } var currentUrlArr = currentUrl.split("&"); var nextHistoryPageArr = []; for(var item in currentUrlArr){ //console.log(currentUrlArr[item]); if(currentUrlArr[item].substring(0,5)=="/mp/p"||currentUrlArr[item].substring(0,5)=="__biz"|| currentUrlArr[item].substring(0,5)=="scene"|| currentUrlArr[item].substring(0,5)=="pass_"){ nextHistoryPageArr.push(currentUrlArr[item]); } } nextHistoryPageUrl = nextHistoryPageArr.join('&'); nextHistoryPageUrl += "&f=json"; //偏移量将在主函数中由函数自动修改 这里不必更改 nextHistoryPageUrl += "&offset=10"; nextHistoryPageUrl += "&count=10&is_ok=1"; nextHistoryPageUrl += "&uin=777&key=777"; nextHistoryPageUrl += "&wxtoken="; nextHistoryPageUrl += "&appmsg_token="; nextHistoryPageUrl += appmsg_token; nextHistoryPageUrl = "https://mp.weixin.qq.com" + nextHistoryPageUrl; nextHistoryPageUrl = nextHistoryPageUrl.replace("home", "getmsg"); console.log("this is raw!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!"); console.log(nextHistoryPageUrl); firstLink = nextHistoryPageUrl; return nextHistoryPageUrl; }, replaceResponseHeader: function(req,res,header){ header = header || {}; console.log("开始:报头由json改为html") if(flag && /mp\/profile_ext\?action=getmsg/i.test(req.url)) { console.log("we have re[.p;ace1"); header['content-type'] = "text/html; charset=UTF-8"; } console.log("成功结束:报头由json改为html") return header; }, //入口函数 replaceServerResDataAsync: function (req, res, serverResData, callback) { console.log("抓捕到数据包。。。"); // console.log(articleLinkArr.size); if(/mp\/profile_ext\?action=home/i.test(req.url)){ try{ var historyHomePage = /var msgList = \'(.*?)\';/; var historyHomePageList = historyHomePage.exec(serverResData.toString()); if(!historyHomePageList){ callback(serverResData); console.log("抓捕到空包!!"); return; } historyHomePageList[1] = historyHomePageList[1].replace(/&quot;/g, "'"); var historyHomePageObj = eval("("+historyHomePageList[1]+")"); //问题:抓包显示有8个介绍界面,但只在historyHomePageList中获取到4个,原因是正则匹配时有问题?? //解决:列表中间的一个historyHomePageObj['list'][item]["app_msg_ext_info"]为undefined, 异常阻止了其他 // 介绍页面的获取! for(var item in historyHomePageObj['list']){ console.log(item); if(historyHomePageObj['list'][item]["app_msg_ext_info"]==undefined){ continue; } console.log(historyHomePageObj['list'][item]["app_msg_ext_info"]["content_url"]); var title = historyHomePageObj.list[item].app_msg_ext_info.title; var author = historyHomePageObj.list[item].app_msg_ext_info.author; var content_url = historyHomePageObj['list'][item]["app_msg_ext_info"]["content_url"]; var datetime = historyHomePageObj.list[item].comm_msg_info.datetime; var id = historyHomePageObj.list[item].comm_msg_info.id; console.log(title); //公众号名称 var nickname_pattern = /var nickname = \"(.*?)\"/; var nickname = nickname_pattern.exec(serverResData.toString())[1]; console.log("公众号的名字是————————", nickname); //当前历史页的文章各种信息 var getdatetime = getNowFormatDate(); var articleJson = { "title": title, "author": author, "content_url": content_url, "datetime": datetime, "id": id, "getdatetime": getdatetime }; // articleLinkArr.push(nickname); articleLinkArr.push(articleJson); } console.log(result); /* MongoClient.connect(DB_CONN_STR, function(err, db) { console.log("连接MongoDB成功!"); insertData(db, function(result) { console.log(result); db.close(); articleLinkArr = []; }); }); */ var appmsg_token_pattern = /window.appmsg_token = \"(.*?)\";/; var appmsg_token = appmsg_token_pattern.exec(serverResData.toString())[1]; var nextHistoryPageUrl = this.getNextUrl(req.url, historyHomePageList, appmsg_token); firstContent = serverResData; //注入跳转下一历史页面的js var next = this.getNextChunk(nextHistoryPageUrl, 6000); var note = this.getNotification(); serverResData = note + serverResData + next; nowOffset = 0; console.log("成功获取到第一页历史消息页面666666666666666666666__end"); callback(serverResData); } catch (e){ callback(serverResData); } } else if(/mp\/profile_ext\?action=getmsg/i.test(req.url)){ try { if(!serverResData){ console.log("抓取公众号全部历史文章结束!"); return; } nowOffset += 10; firstLink = firstLink.replace("&offset="+nowOffset.toString(), "&offset="+(nowOffset+10).toString()); //注入跳转再下一页的js var note = this.getNotification(); var next = this.getNextChunk(firstLink, 6000); var newContent = note + firstContent + next; var newData = serverResData; var ResDataobj = JSON.parse(newData.toString()); var general_msg_list = ResDataobj['general_msg_list']; var listJson = JSON.parse(general_msg_list); for(var artileIndex in listJson.list){ try { var title = listJson.list[artileIndex].app_msg_ext_info.title; var author = listJson.list[artileIndex].app_msg_ext_info.author; var content_url = listJson.list[artileIndex].app_msg_ext_info.content_url; var datetime = listJson.list[artileIndex].comm_msg_info.datetime; var id = listJson.list[artileIndex].comm_msg_info.id; console.log(title); console.log(content_url); console.log(id); console.log(datetime); //当前历史页的文章各种信息 var getdatetime = getNowFormatDate(); var articleJson = { "title": title, "author": author, "content_url": content_url, "datetime": datetime, "id": id, "getdatetime": getdatetime }; console.log("__________", articleLinkArr); articleLinkArr.push(articleJson); console.log("__________", articleLinkArr); } catch (e){ console.log(listJson.list[artileIndex]); console.log("获取某个属性时出错! 可能为短消息,不是历史文章", 'red'); } } /* MongoClient.connect(DB_CONN_STR, function(err, db) { console.log("连接MongoDB成功!"); insertData(db, function(result) { console.log(result); db.close(); articleLinkArr = []; }); }); */ console.log(result); console.log("已成功保存下一页历史消息(原json)"); callback(newContent); } catch (e){ console.log("waht??????????????????????????????????????????2"); callback(serverResData); } } else{ callback(serverResData); } } };
var date =
identifier_name
main.go
package main import ( "fmt" // "html" "log" "net/http" "os" "strings" "cmd/chompapi/login" "cmd/chompapi/register" "cmd/chompapi/globalsessionkeeper" "github.com/achatur/beego/session" "cmd/chompapi/me" "cmd/chompapi/auth" "cmd/chompapi/review" "database/sql" "github.com/gorilla/mux" "cmd/chompapi/crypto" "encoding/base64" "io/ioutil" "encoding/json" "cmd/chompapi/db" "reflect" "errors" ) type handler func(w http.ResponseWriter, r *http.Request) var MyDb *sql.DB // var context *globalsessionkeeper.AppContext func init() { var err error GetConfig() sessionConfig, _ := json.Marshal(globalsessionkeeper.ChompConfig.ManagerConfig) dbConfig := globalsessionkeeper.ChompConfig.DbConfig fmt.Printf("\n\n\nIn init, new manager\n") fmt.Printf("In init, new manager\n") fmt.Printf("In init, new manager\n\n\n\n") globalsessionkeeper.GlobalSessions, err = session.NewManager("mysql", string(sessionConfig)) if err != nil { fmt.Printf("Coud not start session..Error: %v\n", err.Error()) os.Exit(-1) } err = errors.New("") fmt.Printf("Opening DB connection\n") // Connection string looks as the following //MyDb, err = sql.Open("service", "user@tcp(ip:port)/database") connString := fmt.Sprintf("%s:%s@tcp(%s:%s)/%s", dbConfig.User, dbConfig.Pass, dbConfig.Host,dbConfig.Port, dbConfig.Db) fmt.Printf("ConnString = %s\n", connString) MyDb, err = sql.Open("mysql", connString) if err != nil { // return err fmt.Printf("Error = %v\n", err) panic(fmt.Sprintf("%v", err)) } globalsessionkeeper.GlobalSessions.SetSecure(true) go globalsessionkeeper.GlobalSessions.GC() } func BasicAuth(pass handler) handler { return func(w http.ResponseWriter, r *http.Request) { fmt.Println("made it to basic auth") fmt.Printf("Headers = %v\n", r.Header) fmt.Printf("Len = %v\n", len(r.Header)) if len(r.Header["Authorization"]) <= 0 { http.Error(w, "bad syntax", http.StatusBadRequest) return } auth := strings.SplitN(r.Header["Authorization"][0], " ", 2) fmt.Printf("auth = %v", auth) if len(auth) != 2 { http.Error(w, "bad syntax", http.StatusBadRequest) return } else if auth[0] != "Basic" { http.Error(w, "bad syntax", http.StatusBadRequest) return } payload, _ := base64.StdEncoding.DecodeString(auth[1]) pair := strings.SplitN(string(payload), ":", 2) if len(pair) != 2 || !Validate(pair[0], pair[1]) { http.Error(w, "authorization failed", http.StatusUnauthorized) return } pass(w, r) } } func (ah AppHandler) SessionAuth(pass handler) handler { return func(w http.ResponseWriter, r *http.Request) { cookie := globalsessionkeeper.GetCookie(r) if cookie == "" { //need logging here instead of print fmt.Println("Session Auth Cookie = %v", cookie) query := mux.Vars(r) fmt.Printf("Query here.. %v\n", query) if query["token"] != "" { fmt.Printf("Error not nil, updating error instacode %v\n", query["token"]) cookie = query["token"] } else { HttpErrorResponder(w, globalsessionkeeper.ErrorResponse{http.StatusUnauthorized, "No Cookie Present"}) return } } sessionStore, err := globalsessionkeeper.GlobalSessions.GetSessionStore(cookie) if err != nil { //need logging here instead of print HttpErrorResponder(w, globalsessionkeeper.ErrorResponse{http.StatusUnauthorized, "Session Expired"}) return } defer sessionStore.SessionRelease(w) ah.appContext.SessionStore = sessionStore sessionUser := sessionStore.Get("username") fmt.Printf("Session Auth SessionUser = %v\n", sessionUser) if sessionUser == nil { //need logging here instead of print fmt.Printf("Username not found, returning unauth, Get has %v\n", sessionStore) HttpErrorResponder(w, globalsessionkeeper.ErrorResponse{http.StatusUnauthorized, "Session Expired"}) return } fmt.Printf("Session Auth Getting user info for user %v\n", sessionUser) userInfo := new(db.UserInfo) userInfo.Username = reflect.ValueOf(sessionUser).String() err = userInfo.GetUserInfo(MyDb) if err != nil { //need logging here instead of print fmt.Printf("Session Auth Username not found, returning unauth, Get has %v\n", sessionStore) HttpErrorResponder(w, globalsessionkeeper.ErrorResponse{http.StatusUnauthorized, "Session Expired"}) return } pass(w, r) } } func GetConfig() error { configFile, err := ioutil.ReadFile("./chomp_private/config.json") if err != nil { return err } err = json.Unmarshal(configFile, &globalsessionkeeper.ChompConfig) if err != nil { fmt.Printf("Err = %v", err) return err } return nil } func Validate(username, password string) bool { fmt.Println("Made it to validate..") for _, e := range globalsessionkeeper.ChompConfig.Authorized { if e.User == username && e.Pass == password { return true } } return false } type AppHandler struct { appContext *globalsessionkeeper.AppContext h func(*globalsessionkeeper.AppContext, http.ResponseWriter, *http.Request) (error) } func HttpErrorResponder(w http.ResponseWriter, errorResponse globalsessionkeeper.ErrorResponse)
func (ah AppHandler) ServerHttp(w http.ResponseWriter, r *http.Request) { fmt.Printf("AH Context = %v\n", ah.appContext) err := ah.h(ah.appContext, w, r) if err != nil { // log.Printf("HTTP %d: %q", status, err) status := err.(globalsessionkeeper.ErrorResponse).Code switch status { case http.StatusNotFound: fmt.Printf("Error: Page not found\n") HttpErrorResponder(w, err.(globalsessionkeeper.ErrorResponse)) case http.StatusInternalServerError: fmt.Printf("Error: %v\n", http.StatusInternalServerError) HttpErrorResponder(w, err.(globalsessionkeeper.ErrorResponse)) default: fmt.Printf("Error: %v\n", err) HttpErrorResponder(w, err.(globalsessionkeeper.ErrorResponse)) } } } func main() { defer MyDb.Close() router := mux.NewRouter().StrictSlash(true) context := &globalsessionkeeper.AppContext{DB: MyDb} fmt.Printf("Context = %v\n", context) router.HandleFunc("/login", AppHandler{context, login.DoLogin}.ServerHttp) router.HandleFunc("/verify", AppHandler{context, auth.VerifyHandler}.ServerHttp) router.HandleFunc("/register", AppHandler{context, register.DoRegister}.ServerHttp) router.HandleFunc("/admin/fp", BasicAuth(AppHandler{context, register.ForgotPassword}.ServerHttp)) router.HandleFunc("/admin/fu", BasicAuth(AppHandler{context, register.ForgotUsername}.ServerHttp)) router.HandleFunc("/admin/jwt", BasicAuth(AppHandler{context, crypto.GetJwt}.ServerHttp)) router.HandleFunc("/me", AppHandler{appContext: context, h: me.GetMe}.SessionAuth(AppHandler{appContext: context, h: me.GetMe}.ServerHttp)) //this is how you write a query parameter capture uri router.Queries("token", "{token}", "code", "{code:.*}").HandlerFunc(AppHandler{appContext: context, h: me.Instagram}.SessionAuth(AppHandler{context, me.Instagram}.ServerHttp)) router.Queries("token", "{token}", "error", "{error}").HandlerFunc(AppHandler{appContext: context, h: me.Instagram}.SessionAuth(AppHandler{context, me.Instagram}.ServerHttp)) router.Queries("error", "{error}").HandlerFunc(AppHandler{appContext: context, h: me.Instagram}.SessionAuth(AppHandler{context, me.Instagram}.ServerHttp)) router.HandleFunc("/me/logout", AppHandler{appContext: context, h: me.Logout}.SessionAuth(AppHandler{appContext: context, h: me.Logout}.ServerHttp)) router.HandleFunc("/me/logout/all", AppHandler{appContext: context, h: me.LogoutAll}.SessionAuth(AppHandler{appContext: context, h: me.LogoutAll}.ServerHttp)) router.HandleFunc("/me/photos", AppHandler{appContext: context, h: me.PostPhotoId}.SessionAuth(AppHandler{appContext: context, h: me.PostPhotoId}.ServerHttp)) router.HandleFunc("/me/photos/{photoID}", AppHandler{appContext: context, h: me.PostPhotoId}.SessionAuth(AppHandler{appContext: context, h: me.PostPhotoId}.ServerHttp)) router.HandleFunc("/me/reviews", AppHandler{appContext: context, h: me.Reviews}.SessionAuth(AppHandler{appContext: context, h: me.Reviews}.ServerHttp)) router.HandleFunc("/me/update/up", AppHandler{appContext: context, h: me.UpdatePassword}.SessionAuth(AppHandler{appContext: context, h: me.UpdatePassword}.ServerHttp)) router.HandleFunc("/me/update/email", AppHandler{appContext: context, h: me.UpdateEmail}.SessionAuth(AppHandler{appContext: context, h: me.UpdateEmail}.ServerHttp)) router.HandleFunc("/me/update/d/{userID}", AppHandler{appContext: context, h: me.DeleteMe}.SessionAuth(AppHandler{appContext: context, h: me.DeleteMe}.ServerHttp)) router.HandleFunc("/me/update/instaClick", AppHandler{appContext: context, h: me.InstagramLinkClick}.SessionAuth(AppHandler{appContext: context, h: me.InstagramLinkClick}.ServerHttp)) router.HandleFunc("/me/update/da/{userID}", AppHandler{appContext: context, h: me.DeactivateMe}.SessionAuth(AppHandler{appContext: context, h: me.DeactivateMe}.ServerHttp)) router.HandleFunc("/me/update/astu", AppHandler{appContext: context, h: me.UpdateAccountSetupTimestamp}.SessionAuth(AppHandler{appContext: context, h: me.UpdateAccountSetupTimestamp}.ServerHttp)) router.HandleFunc("/reviews", AppHandler{appContext: context, h: review.Reviews}.SessionAuth(AppHandler{appContext: context, h: review.Reviews}.ServerHttp)) router.HandleFunc("/reviews/{reviewID}", AppHandler{appContext: context, h: review.Reviews}.SessionAuth(AppHandler{appContext: context, h: review.Reviews}.ServerHttp)) router.HandleFunc("/insta/crawl", AppHandler{appContext: context, h: review.Crawl}.SessionAuth(AppHandler{appContext: context, h: review.Crawl}.ServerHttp)) router.HandleFunc("/insta/import", AppHandler{appContext: context, h: review.Crawl}.SessionAuth(AppHandler{appContext: context, h: review.AppCrawl}.ServerHttp)) port := "8000" if os.Getenv("PORT") != "" { port = os.Getenv("PORT") } if strings.Contains(string(port), "443") { log.Fatal(http.ListenAndServeTLS(":"+port, globalsessionkeeper.ChompConfig.Cert.Cert, globalsessionkeeper.ChompConfig.Cert.Key, router)) } else { log.Fatal(http.ListenAndServe(":" + port, router)) } }
{ fmt.Printf("Going out as: %v\n", errorResponse) w.Header().Set("Content-Type", "application/json") w.WriteHeader(errorResponse.Code) json.NewEncoder(w).Encode(errorResponse) }
identifier_body
main.go
package main import ( "fmt" // "html" "log" "net/http" "os" "strings" "cmd/chompapi/login" "cmd/chompapi/register" "cmd/chompapi/globalsessionkeeper" "github.com/achatur/beego/session" "cmd/chompapi/me" "cmd/chompapi/auth" "cmd/chompapi/review" "database/sql" "github.com/gorilla/mux" "cmd/chompapi/crypto" "encoding/base64" "io/ioutil" "encoding/json" "cmd/chompapi/db" "reflect" "errors" ) type handler func(w http.ResponseWriter, r *http.Request) var MyDb *sql.DB // var context *globalsessionkeeper.AppContext func init() { var err error GetConfig() sessionConfig, _ := json.Marshal(globalsessionkeeper.ChompConfig.ManagerConfig) dbConfig := globalsessionkeeper.ChompConfig.DbConfig fmt.Printf("\n\n\nIn init, new manager\n") fmt.Printf("In init, new manager\n") fmt.Printf("In init, new manager\n\n\n\n") globalsessionkeeper.GlobalSessions, err = session.NewManager("mysql", string(sessionConfig)) if err != nil { fmt.Printf("Coud not start session..Error: %v\n", err.Error()) os.Exit(-1) } err = errors.New("") fmt.Printf("Opening DB connection\n") // Connection string looks as the following //MyDb, err = sql.Open("service", "user@tcp(ip:port)/database") connString := fmt.Sprintf("%s:%s@tcp(%s:%s)/%s", dbConfig.User, dbConfig.Pass, dbConfig.Host,dbConfig.Port, dbConfig.Db) fmt.Printf("ConnString = %s\n", connString) MyDb, err = sql.Open("mysql", connString) if err != nil { // return err fmt.Printf("Error = %v\n", err) panic(fmt.Sprintf("%v", err)) } globalsessionkeeper.GlobalSessions.SetSecure(true) go globalsessionkeeper.GlobalSessions.GC() } func BasicAuth(pass handler) handler { return func(w http.ResponseWriter, r *http.Request) { fmt.Println("made it to basic auth") fmt.Printf("Headers = %v\n", r.Header) fmt.Printf("Len = %v\n", len(r.Header)) if len(r.Header["Authorization"]) <= 0 { http.Error(w, "bad syntax", http.StatusBadRequest) return } auth := strings.SplitN(r.Header["Authorization"][0], " ", 2) fmt.Printf("auth = %v", auth) if len(auth) != 2 { http.Error(w, "bad syntax", http.StatusBadRequest) return } else if auth[0] != "Basic" { http.Error(w, "bad syntax", http.StatusBadRequest) return } payload, _ := base64.StdEncoding.DecodeString(auth[1]) pair := strings.SplitN(string(payload), ":", 2) if len(pair) != 2 || !Validate(pair[0], pair[1]) { http.Error(w, "authorization failed", http.StatusUnauthorized) return } pass(w, r) } } func (ah AppHandler) SessionAuth(pass handler) handler { return func(w http.ResponseWriter, r *http.Request) { cookie := globalsessionkeeper.GetCookie(r) if cookie == "" { //need logging here instead of print fmt.Println("Session Auth Cookie = %v", cookie) query := mux.Vars(r) fmt.Printf("Query here.. %v\n", query) if query["token"] != "" { fmt.Printf("Error not nil, updating error instacode %v\n", query["token"]) cookie = query["token"] } else { HttpErrorResponder(w, globalsessionkeeper.ErrorResponse{http.StatusUnauthorized, "No Cookie Present"}) return } } sessionStore, err := globalsessionkeeper.GlobalSessions.GetSessionStore(cookie) if err != nil { //need logging here instead of print HttpErrorResponder(w, globalsessionkeeper.ErrorResponse{http.StatusUnauthorized, "Session Expired"}) return } defer sessionStore.SessionRelease(w) ah.appContext.SessionStore = sessionStore sessionUser := sessionStore.Get("username") fmt.Printf("Session Auth SessionUser = %v\n", sessionUser) if sessionUser == nil { //need logging here instead of print fmt.Printf("Username not found, returning unauth, Get has %v\n", sessionStore) HttpErrorResponder(w, globalsessionkeeper.ErrorResponse{http.StatusUnauthorized, "Session Expired"}) return } fmt.Printf("Session Auth Getting user info for user %v\n", sessionUser) userInfo := new(db.UserInfo) userInfo.Username = reflect.ValueOf(sessionUser).String() err = userInfo.GetUserInfo(MyDb) if err != nil { //need logging here instead of print fmt.Printf("Session Auth Username not found, returning unauth, Get has %v\n", sessionStore) HttpErrorResponder(w, globalsessionkeeper.ErrorResponse{http.StatusUnauthorized, "Session Expired"}) return } pass(w, r) } } func GetConfig() error { configFile, err := ioutil.ReadFile("./chomp_private/config.json") if err != nil { return err } err = json.Unmarshal(configFile, &globalsessionkeeper.ChompConfig) if err != nil { fmt.Printf("Err = %v", err) return err } return nil } func Validate(username, password string) bool { fmt.Println("Made it to validate..") for _, e := range globalsessionkeeper.ChompConfig.Authorized { if e.User == username && e.Pass == password { return true } } return false } type AppHandler struct { appContext *globalsessionkeeper.AppContext h func(*globalsessionkeeper.AppContext, http.ResponseWriter, *http.Request) (error) } func HttpErrorResponder(w http.ResponseWriter, errorResponse globalsessionkeeper.ErrorResponse) { fmt.Printf("Going out as: %v\n", errorResponse) w.Header().Set("Content-Type", "application/json") w.WriteHeader(errorResponse.Code) json.NewEncoder(w).Encode(errorResponse) } func (ah AppHandler) ServerHttp(w http.ResponseWriter, r *http.Request) {
status := err.(globalsessionkeeper.ErrorResponse).Code switch status { case http.StatusNotFound: fmt.Printf("Error: Page not found\n") HttpErrorResponder(w, err.(globalsessionkeeper.ErrorResponse)) case http.StatusInternalServerError: fmt.Printf("Error: %v\n", http.StatusInternalServerError) HttpErrorResponder(w, err.(globalsessionkeeper.ErrorResponse)) default: fmt.Printf("Error: %v\n", err) HttpErrorResponder(w, err.(globalsessionkeeper.ErrorResponse)) } } } func main() { defer MyDb.Close() router := mux.NewRouter().StrictSlash(true) context := &globalsessionkeeper.AppContext{DB: MyDb} fmt.Printf("Context = %v\n", context) router.HandleFunc("/login", AppHandler{context, login.DoLogin}.ServerHttp) router.HandleFunc("/verify", AppHandler{context, auth.VerifyHandler}.ServerHttp) router.HandleFunc("/register", AppHandler{context, register.DoRegister}.ServerHttp) router.HandleFunc("/admin/fp", BasicAuth(AppHandler{context, register.ForgotPassword}.ServerHttp)) router.HandleFunc("/admin/fu", BasicAuth(AppHandler{context, register.ForgotUsername}.ServerHttp)) router.HandleFunc("/admin/jwt", BasicAuth(AppHandler{context, crypto.GetJwt}.ServerHttp)) router.HandleFunc("/me", AppHandler{appContext: context, h: me.GetMe}.SessionAuth(AppHandler{appContext: context, h: me.GetMe}.ServerHttp)) //this is how you write a query parameter capture uri router.Queries("token", "{token}", "code", "{code:.*}").HandlerFunc(AppHandler{appContext: context, h: me.Instagram}.SessionAuth(AppHandler{context, me.Instagram}.ServerHttp)) router.Queries("token", "{token}", "error", "{error}").HandlerFunc(AppHandler{appContext: context, h: me.Instagram}.SessionAuth(AppHandler{context, me.Instagram}.ServerHttp)) router.Queries("error", "{error}").HandlerFunc(AppHandler{appContext: context, h: me.Instagram}.SessionAuth(AppHandler{context, me.Instagram}.ServerHttp)) router.HandleFunc("/me/logout", AppHandler{appContext: context, h: me.Logout}.SessionAuth(AppHandler{appContext: context, h: me.Logout}.ServerHttp)) router.HandleFunc("/me/logout/all", AppHandler{appContext: context, h: me.LogoutAll}.SessionAuth(AppHandler{appContext: context, h: me.LogoutAll}.ServerHttp)) router.HandleFunc("/me/photos", AppHandler{appContext: context, h: me.PostPhotoId}.SessionAuth(AppHandler{appContext: context, h: me.PostPhotoId}.ServerHttp)) router.HandleFunc("/me/photos/{photoID}", AppHandler{appContext: context, h: me.PostPhotoId}.SessionAuth(AppHandler{appContext: context, h: me.PostPhotoId}.ServerHttp)) router.HandleFunc("/me/reviews", AppHandler{appContext: context, h: me.Reviews}.SessionAuth(AppHandler{appContext: context, h: me.Reviews}.ServerHttp)) router.HandleFunc("/me/update/up", AppHandler{appContext: context, h: me.UpdatePassword}.SessionAuth(AppHandler{appContext: context, h: me.UpdatePassword}.ServerHttp)) router.HandleFunc("/me/update/email", AppHandler{appContext: context, h: me.UpdateEmail}.SessionAuth(AppHandler{appContext: context, h: me.UpdateEmail}.ServerHttp)) router.HandleFunc("/me/update/d/{userID}", AppHandler{appContext: context, h: me.DeleteMe}.SessionAuth(AppHandler{appContext: context, h: me.DeleteMe}.ServerHttp)) router.HandleFunc("/me/update/instaClick", AppHandler{appContext: context, h: me.InstagramLinkClick}.SessionAuth(AppHandler{appContext: context, h: me.InstagramLinkClick}.ServerHttp)) router.HandleFunc("/me/update/da/{userID}", AppHandler{appContext: context, h: me.DeactivateMe}.SessionAuth(AppHandler{appContext: context, h: me.DeactivateMe}.ServerHttp)) router.HandleFunc("/me/update/astu", AppHandler{appContext: context, h: me.UpdateAccountSetupTimestamp}.SessionAuth(AppHandler{appContext: context, h: me.UpdateAccountSetupTimestamp}.ServerHttp)) router.HandleFunc("/reviews", AppHandler{appContext: context, h: review.Reviews}.SessionAuth(AppHandler{appContext: context, h: review.Reviews}.ServerHttp)) router.HandleFunc("/reviews/{reviewID}", AppHandler{appContext: context, h: review.Reviews}.SessionAuth(AppHandler{appContext: context, h: review.Reviews}.ServerHttp)) router.HandleFunc("/insta/crawl", AppHandler{appContext: context, h: review.Crawl}.SessionAuth(AppHandler{appContext: context, h: review.Crawl}.ServerHttp)) router.HandleFunc("/insta/import", AppHandler{appContext: context, h: review.Crawl}.SessionAuth(AppHandler{appContext: context, h: review.AppCrawl}.ServerHttp)) port := "8000" if os.Getenv("PORT") != "" { port = os.Getenv("PORT") } if strings.Contains(string(port), "443") { log.Fatal(http.ListenAndServeTLS(":"+port, globalsessionkeeper.ChompConfig.Cert.Cert, globalsessionkeeper.ChompConfig.Cert.Key, router)) } else { log.Fatal(http.ListenAndServe(":" + port, router)) } }
fmt.Printf("AH Context = %v\n", ah.appContext) err := ah.h(ah.appContext, w, r) if err != nil { // log.Printf("HTTP %d: %q", status, err)
random_line_split
main.go
package main import ( "fmt" // "html" "log" "net/http" "os" "strings" "cmd/chompapi/login" "cmd/chompapi/register" "cmd/chompapi/globalsessionkeeper" "github.com/achatur/beego/session" "cmd/chompapi/me" "cmd/chompapi/auth" "cmd/chompapi/review" "database/sql" "github.com/gorilla/mux" "cmd/chompapi/crypto" "encoding/base64" "io/ioutil" "encoding/json" "cmd/chompapi/db" "reflect" "errors" ) type handler func(w http.ResponseWriter, r *http.Request) var MyDb *sql.DB // var context *globalsessionkeeper.AppContext func init() { var err error GetConfig() sessionConfig, _ := json.Marshal(globalsessionkeeper.ChompConfig.ManagerConfig) dbConfig := globalsessionkeeper.ChompConfig.DbConfig fmt.Printf("\n\n\nIn init, new manager\n") fmt.Printf("In init, new manager\n") fmt.Printf("In init, new manager\n\n\n\n") globalsessionkeeper.GlobalSessions, err = session.NewManager("mysql", string(sessionConfig)) if err != nil { fmt.Printf("Coud not start session..Error: %v\n", err.Error()) os.Exit(-1) } err = errors.New("") fmt.Printf("Opening DB connection\n") // Connection string looks as the following //MyDb, err = sql.Open("service", "user@tcp(ip:port)/database") connString := fmt.Sprintf("%s:%s@tcp(%s:%s)/%s", dbConfig.User, dbConfig.Pass, dbConfig.Host,dbConfig.Port, dbConfig.Db) fmt.Printf("ConnString = %s\n", connString) MyDb, err = sql.Open("mysql", connString) if err != nil { // return err fmt.Printf("Error = %v\n", err) panic(fmt.Sprintf("%v", err)) } globalsessionkeeper.GlobalSessions.SetSecure(true) go globalsessionkeeper.GlobalSessions.GC() } func BasicAuth(pass handler) handler { return func(w http.ResponseWriter, r *http.Request) { fmt.Println("made it to basic auth") fmt.Printf("Headers = %v\n", r.Header) fmt.Printf("Len = %v\n", len(r.Header)) if len(r.Header["Authorization"]) <= 0 { http.Error(w, "bad syntax", http.StatusBadRequest) return } auth := strings.SplitN(r.Header["Authorization"][0], " ", 2) fmt.Printf("auth = %v", auth) if len(auth) != 2
else if auth[0] != "Basic" { http.Error(w, "bad syntax", http.StatusBadRequest) return } payload, _ := base64.StdEncoding.DecodeString(auth[1]) pair := strings.SplitN(string(payload), ":", 2) if len(pair) != 2 || !Validate(pair[0], pair[1]) { http.Error(w, "authorization failed", http.StatusUnauthorized) return } pass(w, r) } } func (ah AppHandler) SessionAuth(pass handler) handler { return func(w http.ResponseWriter, r *http.Request) { cookie := globalsessionkeeper.GetCookie(r) if cookie == "" { //need logging here instead of print fmt.Println("Session Auth Cookie = %v", cookie) query := mux.Vars(r) fmt.Printf("Query here.. %v\n", query) if query["token"] != "" { fmt.Printf("Error not nil, updating error instacode %v\n", query["token"]) cookie = query["token"] } else { HttpErrorResponder(w, globalsessionkeeper.ErrorResponse{http.StatusUnauthorized, "No Cookie Present"}) return } } sessionStore, err := globalsessionkeeper.GlobalSessions.GetSessionStore(cookie) if err != nil { //need logging here instead of print HttpErrorResponder(w, globalsessionkeeper.ErrorResponse{http.StatusUnauthorized, "Session Expired"}) return } defer sessionStore.SessionRelease(w) ah.appContext.SessionStore = sessionStore sessionUser := sessionStore.Get("username") fmt.Printf("Session Auth SessionUser = %v\n", sessionUser) if sessionUser == nil { //need logging here instead of print fmt.Printf("Username not found, returning unauth, Get has %v\n", sessionStore) HttpErrorResponder(w, globalsessionkeeper.ErrorResponse{http.StatusUnauthorized, "Session Expired"}) return } fmt.Printf("Session Auth Getting user info for user %v\n", sessionUser) userInfo := new(db.UserInfo) userInfo.Username = reflect.ValueOf(sessionUser).String() err = userInfo.GetUserInfo(MyDb) if err != nil { //need logging here instead of print fmt.Printf("Session Auth Username not found, returning unauth, Get has %v\n", sessionStore) HttpErrorResponder(w, globalsessionkeeper.ErrorResponse{http.StatusUnauthorized, "Session Expired"}) return } pass(w, r) } } func GetConfig() error { configFile, err := ioutil.ReadFile("./chomp_private/config.json") if err != nil { return err } err = json.Unmarshal(configFile, &globalsessionkeeper.ChompConfig) if err != nil { fmt.Printf("Err = %v", err) return err } return nil } func Validate(username, password string) bool { fmt.Println("Made it to validate..") for _, e := range globalsessionkeeper.ChompConfig.Authorized { if e.User == username && e.Pass == password { return true } } return false } type AppHandler struct { appContext *globalsessionkeeper.AppContext h func(*globalsessionkeeper.AppContext, http.ResponseWriter, *http.Request) (error) } func HttpErrorResponder(w http.ResponseWriter, errorResponse globalsessionkeeper.ErrorResponse) { fmt.Printf("Going out as: %v\n", errorResponse) w.Header().Set("Content-Type", "application/json") w.WriteHeader(errorResponse.Code) json.NewEncoder(w).Encode(errorResponse) } func (ah AppHandler) ServerHttp(w http.ResponseWriter, r *http.Request) { fmt.Printf("AH Context = %v\n", ah.appContext) err := ah.h(ah.appContext, w, r) if err != nil { // log.Printf("HTTP %d: %q", status, err) status := err.(globalsessionkeeper.ErrorResponse).Code switch status { case http.StatusNotFound: fmt.Printf("Error: Page not found\n") HttpErrorResponder(w, err.(globalsessionkeeper.ErrorResponse)) case http.StatusInternalServerError: fmt.Printf("Error: %v\n", http.StatusInternalServerError) HttpErrorResponder(w, err.(globalsessionkeeper.ErrorResponse)) default: fmt.Printf("Error: %v\n", err) HttpErrorResponder(w, err.(globalsessionkeeper.ErrorResponse)) } } } func main() { defer MyDb.Close() router := mux.NewRouter().StrictSlash(true) context := &globalsessionkeeper.AppContext{DB: MyDb} fmt.Printf("Context = %v\n", context) router.HandleFunc("/login", AppHandler{context, login.DoLogin}.ServerHttp) router.HandleFunc("/verify", AppHandler{context, auth.VerifyHandler}.ServerHttp) router.HandleFunc("/register", AppHandler{context, register.DoRegister}.ServerHttp) router.HandleFunc("/admin/fp", BasicAuth(AppHandler{context, register.ForgotPassword}.ServerHttp)) router.HandleFunc("/admin/fu", BasicAuth(AppHandler{context, register.ForgotUsername}.ServerHttp)) router.HandleFunc("/admin/jwt", BasicAuth(AppHandler{context, crypto.GetJwt}.ServerHttp)) router.HandleFunc("/me", AppHandler{appContext: context, h: me.GetMe}.SessionAuth(AppHandler{appContext: context, h: me.GetMe}.ServerHttp)) //this is how you write a query parameter capture uri router.Queries("token", "{token}", "code", "{code:.*}").HandlerFunc(AppHandler{appContext: context, h: me.Instagram}.SessionAuth(AppHandler{context, me.Instagram}.ServerHttp)) router.Queries("token", "{token}", "error", "{error}").HandlerFunc(AppHandler{appContext: context, h: me.Instagram}.SessionAuth(AppHandler{context, me.Instagram}.ServerHttp)) router.Queries("error", "{error}").HandlerFunc(AppHandler{appContext: context, h: me.Instagram}.SessionAuth(AppHandler{context, me.Instagram}.ServerHttp)) router.HandleFunc("/me/logout", AppHandler{appContext: context, h: me.Logout}.SessionAuth(AppHandler{appContext: context, h: me.Logout}.ServerHttp)) router.HandleFunc("/me/logout/all", AppHandler{appContext: context, h: me.LogoutAll}.SessionAuth(AppHandler{appContext: context, h: me.LogoutAll}.ServerHttp)) router.HandleFunc("/me/photos", AppHandler{appContext: context, h: me.PostPhotoId}.SessionAuth(AppHandler{appContext: context, h: me.PostPhotoId}.ServerHttp)) router.HandleFunc("/me/photos/{photoID}", AppHandler{appContext: context, h: me.PostPhotoId}.SessionAuth(AppHandler{appContext: context, h: me.PostPhotoId}.ServerHttp)) router.HandleFunc("/me/reviews", AppHandler{appContext: context, h: me.Reviews}.SessionAuth(AppHandler{appContext: context, h: me.Reviews}.ServerHttp)) router.HandleFunc("/me/update/up", AppHandler{appContext: context, h: me.UpdatePassword}.SessionAuth(AppHandler{appContext: context, h: me.UpdatePassword}.ServerHttp)) router.HandleFunc("/me/update/email", AppHandler{appContext: context, h: me.UpdateEmail}.SessionAuth(AppHandler{appContext: context, h: me.UpdateEmail}.ServerHttp)) router.HandleFunc("/me/update/d/{userID}", AppHandler{appContext: context, h: me.DeleteMe}.SessionAuth(AppHandler{appContext: context, h: me.DeleteMe}.ServerHttp)) router.HandleFunc("/me/update/instaClick", AppHandler{appContext: context, h: me.InstagramLinkClick}.SessionAuth(AppHandler{appContext: context, h: me.InstagramLinkClick}.ServerHttp)) router.HandleFunc("/me/update/da/{userID}", AppHandler{appContext: context, h: me.DeactivateMe}.SessionAuth(AppHandler{appContext: context, h: me.DeactivateMe}.ServerHttp)) router.HandleFunc("/me/update/astu", AppHandler{appContext: context, h: me.UpdateAccountSetupTimestamp}.SessionAuth(AppHandler{appContext: context, h: me.UpdateAccountSetupTimestamp}.ServerHttp)) router.HandleFunc("/reviews", AppHandler{appContext: context, h: review.Reviews}.SessionAuth(AppHandler{appContext: context, h: review.Reviews}.ServerHttp)) router.HandleFunc("/reviews/{reviewID}", AppHandler{appContext: context, h: review.Reviews}.SessionAuth(AppHandler{appContext: context, h: review.Reviews}.ServerHttp)) router.HandleFunc("/insta/crawl", AppHandler{appContext: context, h: review.Crawl}.SessionAuth(AppHandler{appContext: context, h: review.Crawl}.ServerHttp)) router.HandleFunc("/insta/import", AppHandler{appContext: context, h: review.Crawl}.SessionAuth(AppHandler{appContext: context, h: review.AppCrawl}.ServerHttp)) port := "8000" if os.Getenv("PORT") != "" { port = os.Getenv("PORT") } if strings.Contains(string(port), "443") { log.Fatal(http.ListenAndServeTLS(":"+port, globalsessionkeeper.ChompConfig.Cert.Cert, globalsessionkeeper.ChompConfig.Cert.Key, router)) } else { log.Fatal(http.ListenAndServe(":" + port, router)) } }
{ http.Error(w, "bad syntax", http.StatusBadRequest) return }
conditional_block
main.go
package main import ( "fmt" // "html" "log" "net/http" "os" "strings" "cmd/chompapi/login" "cmd/chompapi/register" "cmd/chompapi/globalsessionkeeper" "github.com/achatur/beego/session" "cmd/chompapi/me" "cmd/chompapi/auth" "cmd/chompapi/review" "database/sql" "github.com/gorilla/mux" "cmd/chompapi/crypto" "encoding/base64" "io/ioutil" "encoding/json" "cmd/chompapi/db" "reflect" "errors" ) type handler func(w http.ResponseWriter, r *http.Request) var MyDb *sql.DB // var context *globalsessionkeeper.AppContext func init() { var err error GetConfig() sessionConfig, _ := json.Marshal(globalsessionkeeper.ChompConfig.ManagerConfig) dbConfig := globalsessionkeeper.ChompConfig.DbConfig fmt.Printf("\n\n\nIn init, new manager\n") fmt.Printf("In init, new manager\n") fmt.Printf("In init, new manager\n\n\n\n") globalsessionkeeper.GlobalSessions, err = session.NewManager("mysql", string(sessionConfig)) if err != nil { fmt.Printf("Coud not start session..Error: %v\n", err.Error()) os.Exit(-1) } err = errors.New("") fmt.Printf("Opening DB connection\n") // Connection string looks as the following //MyDb, err = sql.Open("service", "user@tcp(ip:port)/database") connString := fmt.Sprintf("%s:%s@tcp(%s:%s)/%s", dbConfig.User, dbConfig.Pass, dbConfig.Host,dbConfig.Port, dbConfig.Db) fmt.Printf("ConnString = %s\n", connString) MyDb, err = sql.Open("mysql", connString) if err != nil { // return err fmt.Printf("Error = %v\n", err) panic(fmt.Sprintf("%v", err)) } globalsessionkeeper.GlobalSessions.SetSecure(true) go globalsessionkeeper.GlobalSessions.GC() } func BasicAuth(pass handler) handler { return func(w http.ResponseWriter, r *http.Request) { fmt.Println("made it to basic auth") fmt.Printf("Headers = %v\n", r.Header) fmt.Printf("Len = %v\n", len(r.Header)) if len(r.Header["Authorization"]) <= 0 { http.Error(w, "bad syntax", http.StatusBadRequest) return } auth := strings.SplitN(r.Header["Authorization"][0], " ", 2) fmt.Printf("auth = %v", auth) if len(auth) != 2 { http.Error(w, "bad syntax", http.StatusBadRequest) return } else if auth[0] != "Basic" { http.Error(w, "bad syntax", http.StatusBadRequest) return } payload, _ := base64.StdEncoding.DecodeString(auth[1]) pair := strings.SplitN(string(payload), ":", 2) if len(pair) != 2 || !Validate(pair[0], pair[1]) { http.Error(w, "authorization failed", http.StatusUnauthorized) return } pass(w, r) } } func (ah AppHandler)
(pass handler) handler { return func(w http.ResponseWriter, r *http.Request) { cookie := globalsessionkeeper.GetCookie(r) if cookie == "" { //need logging here instead of print fmt.Println("Session Auth Cookie = %v", cookie) query := mux.Vars(r) fmt.Printf("Query here.. %v\n", query) if query["token"] != "" { fmt.Printf("Error not nil, updating error instacode %v\n", query["token"]) cookie = query["token"] } else { HttpErrorResponder(w, globalsessionkeeper.ErrorResponse{http.StatusUnauthorized, "No Cookie Present"}) return } } sessionStore, err := globalsessionkeeper.GlobalSessions.GetSessionStore(cookie) if err != nil { //need logging here instead of print HttpErrorResponder(w, globalsessionkeeper.ErrorResponse{http.StatusUnauthorized, "Session Expired"}) return } defer sessionStore.SessionRelease(w) ah.appContext.SessionStore = sessionStore sessionUser := sessionStore.Get("username") fmt.Printf("Session Auth SessionUser = %v\n", sessionUser) if sessionUser == nil { //need logging here instead of print fmt.Printf("Username not found, returning unauth, Get has %v\n", sessionStore) HttpErrorResponder(w, globalsessionkeeper.ErrorResponse{http.StatusUnauthorized, "Session Expired"}) return } fmt.Printf("Session Auth Getting user info for user %v\n", sessionUser) userInfo := new(db.UserInfo) userInfo.Username = reflect.ValueOf(sessionUser).String() err = userInfo.GetUserInfo(MyDb) if err != nil { //need logging here instead of print fmt.Printf("Session Auth Username not found, returning unauth, Get has %v\n", sessionStore) HttpErrorResponder(w, globalsessionkeeper.ErrorResponse{http.StatusUnauthorized, "Session Expired"}) return } pass(w, r) } } func GetConfig() error { configFile, err := ioutil.ReadFile("./chomp_private/config.json") if err != nil { return err } err = json.Unmarshal(configFile, &globalsessionkeeper.ChompConfig) if err != nil { fmt.Printf("Err = %v", err) return err } return nil } func Validate(username, password string) bool { fmt.Println("Made it to validate..") for _, e := range globalsessionkeeper.ChompConfig.Authorized { if e.User == username && e.Pass == password { return true } } return false } type AppHandler struct { appContext *globalsessionkeeper.AppContext h func(*globalsessionkeeper.AppContext, http.ResponseWriter, *http.Request) (error) } func HttpErrorResponder(w http.ResponseWriter, errorResponse globalsessionkeeper.ErrorResponse) { fmt.Printf("Going out as: %v\n", errorResponse) w.Header().Set("Content-Type", "application/json") w.WriteHeader(errorResponse.Code) json.NewEncoder(w).Encode(errorResponse) } func (ah AppHandler) ServerHttp(w http.ResponseWriter, r *http.Request) { fmt.Printf("AH Context = %v\n", ah.appContext) err := ah.h(ah.appContext, w, r) if err != nil { // log.Printf("HTTP %d: %q", status, err) status := err.(globalsessionkeeper.ErrorResponse).Code switch status { case http.StatusNotFound: fmt.Printf("Error: Page not found\n") HttpErrorResponder(w, err.(globalsessionkeeper.ErrorResponse)) case http.StatusInternalServerError: fmt.Printf("Error: %v\n", http.StatusInternalServerError) HttpErrorResponder(w, err.(globalsessionkeeper.ErrorResponse)) default: fmt.Printf("Error: %v\n", err) HttpErrorResponder(w, err.(globalsessionkeeper.ErrorResponse)) } } } func main() { defer MyDb.Close() router := mux.NewRouter().StrictSlash(true) context := &globalsessionkeeper.AppContext{DB: MyDb} fmt.Printf("Context = %v\n", context) router.HandleFunc("/login", AppHandler{context, login.DoLogin}.ServerHttp) router.HandleFunc("/verify", AppHandler{context, auth.VerifyHandler}.ServerHttp) router.HandleFunc("/register", AppHandler{context, register.DoRegister}.ServerHttp) router.HandleFunc("/admin/fp", BasicAuth(AppHandler{context, register.ForgotPassword}.ServerHttp)) router.HandleFunc("/admin/fu", BasicAuth(AppHandler{context, register.ForgotUsername}.ServerHttp)) router.HandleFunc("/admin/jwt", BasicAuth(AppHandler{context, crypto.GetJwt}.ServerHttp)) router.HandleFunc("/me", AppHandler{appContext: context, h: me.GetMe}.SessionAuth(AppHandler{appContext: context, h: me.GetMe}.ServerHttp)) //this is how you write a query parameter capture uri router.Queries("token", "{token}", "code", "{code:.*}").HandlerFunc(AppHandler{appContext: context, h: me.Instagram}.SessionAuth(AppHandler{context, me.Instagram}.ServerHttp)) router.Queries("token", "{token}", "error", "{error}").HandlerFunc(AppHandler{appContext: context, h: me.Instagram}.SessionAuth(AppHandler{context, me.Instagram}.ServerHttp)) router.Queries("error", "{error}").HandlerFunc(AppHandler{appContext: context, h: me.Instagram}.SessionAuth(AppHandler{context, me.Instagram}.ServerHttp)) router.HandleFunc("/me/logout", AppHandler{appContext: context, h: me.Logout}.SessionAuth(AppHandler{appContext: context, h: me.Logout}.ServerHttp)) router.HandleFunc("/me/logout/all", AppHandler{appContext: context, h: me.LogoutAll}.SessionAuth(AppHandler{appContext: context, h: me.LogoutAll}.ServerHttp)) router.HandleFunc("/me/photos", AppHandler{appContext: context, h: me.PostPhotoId}.SessionAuth(AppHandler{appContext: context, h: me.PostPhotoId}.ServerHttp)) router.HandleFunc("/me/photos/{photoID}", AppHandler{appContext: context, h: me.PostPhotoId}.SessionAuth(AppHandler{appContext: context, h: me.PostPhotoId}.ServerHttp)) router.HandleFunc("/me/reviews", AppHandler{appContext: context, h: me.Reviews}.SessionAuth(AppHandler{appContext: context, h: me.Reviews}.ServerHttp)) router.HandleFunc("/me/update/up", AppHandler{appContext: context, h: me.UpdatePassword}.SessionAuth(AppHandler{appContext: context, h: me.UpdatePassword}.ServerHttp)) router.HandleFunc("/me/update/email", AppHandler{appContext: context, h: me.UpdateEmail}.SessionAuth(AppHandler{appContext: context, h: me.UpdateEmail}.ServerHttp)) router.HandleFunc("/me/update/d/{userID}", AppHandler{appContext: context, h: me.DeleteMe}.SessionAuth(AppHandler{appContext: context, h: me.DeleteMe}.ServerHttp)) router.HandleFunc("/me/update/instaClick", AppHandler{appContext: context, h: me.InstagramLinkClick}.SessionAuth(AppHandler{appContext: context, h: me.InstagramLinkClick}.ServerHttp)) router.HandleFunc("/me/update/da/{userID}", AppHandler{appContext: context, h: me.DeactivateMe}.SessionAuth(AppHandler{appContext: context, h: me.DeactivateMe}.ServerHttp)) router.HandleFunc("/me/update/astu", AppHandler{appContext: context, h: me.UpdateAccountSetupTimestamp}.SessionAuth(AppHandler{appContext: context, h: me.UpdateAccountSetupTimestamp}.ServerHttp)) router.HandleFunc("/reviews", AppHandler{appContext: context, h: review.Reviews}.SessionAuth(AppHandler{appContext: context, h: review.Reviews}.ServerHttp)) router.HandleFunc("/reviews/{reviewID}", AppHandler{appContext: context, h: review.Reviews}.SessionAuth(AppHandler{appContext: context, h: review.Reviews}.ServerHttp)) router.HandleFunc("/insta/crawl", AppHandler{appContext: context, h: review.Crawl}.SessionAuth(AppHandler{appContext: context, h: review.Crawl}.ServerHttp)) router.HandleFunc("/insta/import", AppHandler{appContext: context, h: review.Crawl}.SessionAuth(AppHandler{appContext: context, h: review.AppCrawl}.ServerHttp)) port := "8000" if os.Getenv("PORT") != "" { port = os.Getenv("PORT") } if strings.Contains(string(port), "443") { log.Fatal(http.ListenAndServeTLS(":"+port, globalsessionkeeper.ChompConfig.Cert.Cert, globalsessionkeeper.ChompConfig.Cert.Key, router)) } else { log.Fatal(http.ListenAndServe(":" + port, router)) } }
SessionAuth
identifier_name
predicates.go
// Copyright 2019 Yunion // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package predicates import ( "fmt" "sort" "strings" "sync" "yunion.io/x/jsonutils" "yunion.io/x/log" "yunion.io/x/pkg/util/errors" computeapi "yunion.io/x/onecloud/pkg/apis/compute" "yunion.io/x/onecloud/pkg/compute/models" "yunion.io/x/onecloud/pkg/scheduler/algorithm/plugin" "yunion.io/x/onecloud/pkg/scheduler/api" "yunion.io/x/onecloud/pkg/scheduler/core" ) // BasePredicate is a default struct for all the predicates that will // include it and implement it's Name() and PreExecute() methods. type BasePredicate struct{} func (b *BasePredicate) Name() string { return "base_predicate_should_not_be_called" } func (b *BasePredicate) PreExecute(unit *core.Unit, candis []core.Candidater) (bool, error) { return true, nil } func (b *BasePredicate) GetHypervisorDriver(u *core.Unit) models.IGuestDriver { return models.GetDriver(u.GetHypervisor()) } type PredicateHelper struct { predicate core.FitPredicate predicateFails []core.PredicateFailureReason capacity int64 Unit *core.Unit Candidate core.Candidater } func (h *PredicateHelper) getResult() (bool, []core.PredicateFailureReason, error) { if len(h.predicateFails) > 0 { return false, h.predicateFails, nil } if h.capacity == 0 { return false, []core.PredicateFailureReason{}, nil } return true, nil, nil } func (h *PredicateHelper) GetResult() (bool, []core.PredicateFailureReason, error) { ok, reasons, err := h.getResult() if !ok { log.Warningf("[Filter Result] candidate: %q, filter: %q, is_ok: %v, reason: %q, error: %v\n", h.Candidate.IndexKey(), h.predicate.Name(), ok, getReasonsString(reasons), err) } return ok, reasons, err } func getReasonsString(reasons []core.PredicateFailureReason) string { if len(reasons) == 0 { return "" } ss := make([]string, 0, len(reasons)) for _, reason := range reasons { ss = append(ss, reason.GetReason()) } return strings.Join(ss, ", ") } func NewPredicateHelper(pre core.FitPredicate, unit *core.Unit, candi core.Candidater) *PredicateHelper { h := &PredicateHelper{ predicate: pre, capacity: core.EmptyCapacity, predicateFails: []core.PredicateFailureReason{}, Unit: unit, Candidate: candi, } return h } func (h *PredicateHelper) GetFailedResult(err error) (bool, []core.PredicateFailureReason, error) { return false, nil, err } func (h *PredicateHelper) AppendPredicateFail(reason core.PredicateFailureReason) { h.predicateFails = append(h.predicateFails, reason) } type predicateFailure struct { err core.PredicateFailureError eType string } func (f predicateFailure) GetReason() string { return f.err.GetReason() } func (f predicateFailure) GetType() string { return f.eType } func (h *PredicateHelper) AppendPredicateFailMsg(reason string) { h.AppendPredicateFailMsgWithType(reason, h.predicate.Name()) } func (h *PredicateHelper) AppendPredicateFailMsgWithType(reason string, eType string) { err := NewUnexceptedResourceError(reason) h.AppendPredicateFail(&predicateFailure{err: err, eType: eType}) } func (h *PredicateHelper) AppendInsufficientResourceError(req, total, free int64) { h.AppendPredicateFail( &predicateFailure{ err: NewInsufficientResourceError(h.Candidate.Getter().Name(), req, total, free), eType: h.predicate.Name(), }) } // SetCapacity returns the current resource capacity calculated by a filter. // And 'capacity' default is -1. func (h *PredicateHelper) SetCapacity(capacity int64) { if capacity < 0 { capacity = 0 } h.SetCapacityCounter(core.NewNormalCounter(capacity)) } func (h *PredicateHelper) SetCapacityCounter(counter core.Counter) { capacity := counter.GetCount() if capacity < core.EmptyCapacity { capacity = core.EmptyCapacity } h.capacity = capacity h.Unit.SetCapacity(h.Candidate.IndexKey(), h.predicate.Name(), counter) } func (h *PredicateHelper) SetSelectPriority(sp int) { if sp < 0 { sp = 0 } h.Unit.SetSelectPriorityWithLock(h.Candidate.IndexKey(), h.predicate.Name(), core.SSelectPriorityValue(sp)) } func (h *PredicateHelper) Exclude(reason string) { h.SetCapacity(0) h.AppendPredicateFailMsg(reason) } func (h *PredicateHelper) ExcludeByErrors(errs []core.PredicateFailureReason) { h.SetCapacity(0) for _, err := range errs { h.AppendPredicateFail(err) } } func (h *PredicateHelper) Exclude2(predicateName string, current, expected interface{}) { h.Exclude(fmt.Sprintf("%s is '%v', expected '%v'", predicateName, current, expected)) } // UseReserved check whether the unit can use guest reserved resource func (h *PredicateHelper) UseReserved() bool { usable := false data := h.Unit.SchedData() isoDevs := data.IsolatedDevices if len(isoDevs) > 0 { usable = true } return usable } type PredicatedSchedtagResource struct { ISchedtagCandidateResource PreferTags []computeapi.SchedtagConfig AvoidTags []computeapi.SchedtagConfig } type SchedtagInputResourcesMap map[int][]*PredicatedSchedtagResource func (m SchedtagInputResourcesMap) getAllTags(isPrefer bool) []computeapi.SchedtagConfig { ret := make([]computeapi.SchedtagConfig, 0) for _, ss := range m { for _, s := range ss { var tags []computeapi.SchedtagConfig if isPrefer { tags = s.PreferTags } else { tags = s.AvoidTags } ret = append(ret, tags...) } } return ret } func (m SchedtagInputResourcesMap) GetPreferTags() []computeapi.SchedtagConfig
func (m SchedtagInputResourcesMap) GetAvoidTags() []computeapi.SchedtagConfig { return m.getAllTags(false) } type CandidateInputResourcesMap struct { *sync.Map // map[string]SchedtagInputResourcesMap } type ISchedtagCandidateResource interface { GetName() string GetId() string Keyword() string GetSchedtags() []models.SSchedtag GetSchedtagJointManager() models.ISchedtagJointManager GetDynamicConditionInput() *jsonutils.JSONDict } type ISchedtagPredicateInstance interface { core.FitPredicate OnPriorityEnd(u *core.Unit, c core.Candidater) OnSelectEnd(u *core.Unit, c core.Candidater, count int64) GetInputs(u *core.Unit) []ISchedtagCustomer GetResources(c core.Candidater) []ISchedtagCandidateResource IsResourceMatchInput(input ISchedtagCustomer, res ISchedtagCandidateResource) bool IsResourceFitInput(unit *core.Unit, c core.Candidater, res ISchedtagCandidateResource, input ISchedtagCustomer) core.PredicateFailureReason DoSelect(c core.Candidater, input ISchedtagCustomer, res []ISchedtagCandidateResource) []ISchedtagCandidateResource AddSelectResult(index int, input ISchedtagCustomer, selectRes []ISchedtagCandidateResource, output *core.AllocatedResource) GetCandidateResourceSortScore(candidate ISchedtagCandidateResource) int64 } type BaseSchedtagPredicate struct { BasePredicate plugin.BasePlugin CandidateInputResources *CandidateInputResourcesMap Hypervisor string } func NewBaseSchedtagPredicate() *BaseSchedtagPredicate { return &BaseSchedtagPredicate{ CandidateInputResources: &CandidateInputResourcesMap{Map: new(sync.Map)}, // make(map[string]SchedtagInputResourcesMap), } } func (p *PredicatedSchedtagResource) isNoTag() bool { return len(p.PreferTags) == 0 && len(p.AvoidTags) == 0 } func (p *PredicatedSchedtagResource) hasPreferTags() bool { return len(p.PreferTags) != 0 } func (p *PredicatedSchedtagResource) hasAvoidTags() bool { return len(p.AvoidTags) != 0 } type ISchedtagCustomer interface { JSON(interface{}) *jsonutils.JSONDict Keyword() string IsSpecifyResource() bool GetSchedtags() []*computeapi.SchedtagConfig ResourceKeyword() string } type SchedtagResourceW struct { candidater ISchedtagCandidateResource input ISchedtagCustomer } func (w SchedtagResourceW) IndexKey() string { return fmt.Sprintf("%s:%s", w.candidater.GetName(), w.candidater.GetId()) } func (w SchedtagResourceW) ResourceType() string { return getSchedtagResourceType(w.candidater) } func getSchedtagResourceType(candidater ISchedtagCandidateResource) string { return candidater.GetSchedtagJointManager().GetMasterManager().KeywordPlural() } func (w SchedtagResourceW) GetSchedtags() []models.SSchedtag { return w.candidater.GetSchedtags() } func (w SchedtagResourceW) GetDynamicSchedDesc() *jsonutils.JSONDict { ret := jsonutils.NewDict() resSchedDesc := w.candidater.GetDynamicConditionInput() inputSchedDesc := w.input.JSON(w.input) ret.Add(resSchedDesc, w.candidater.Keyword()) ret.Add(inputSchedDesc, w.input.Keyword()) return ret } func (p *BaseSchedtagPredicate) GetHypervisorDriver() models.IGuestDriver { return models.GetDriver(p.Hypervisor) } func (p *BaseSchedtagPredicate) check(input ISchedtagCustomer, candidate ISchedtagCandidateResource, u *core.Unit, c core.Candidater) (*PredicatedSchedtagResource, error) { allTags, err := GetAllSchedtags(getSchedtagResourceType(candidate)) if err != nil { return nil, err } tagPredicate := NewSchedtagPredicate(input.GetSchedtags(), allTags) shouldExec := u.ShouldExecuteSchedtagFilter(c.Getter().Id()) res := &PredicatedSchedtagResource{ ISchedtagCandidateResource: candidate, } if shouldExec && !input.IsSpecifyResource() { if err := tagPredicate.Check( SchedtagResourceW{ candidater: candidate, input: input, }, ); err != nil { return nil, err } res.PreferTags = tagPredicate.GetPreferTags() res.AvoidTags = tagPredicate.GetAvoidTags() } return res, nil } func (p *BaseSchedtagPredicate) checkResources(input ISchedtagCustomer, ress []ISchedtagCandidateResource, u *core.Unit, c core.Candidater) ([]*PredicatedSchedtagResource, error) { errs := make([]error, 0) ret := make([]*PredicatedSchedtagResource, 0) for _, res := range ress { ps, err := p.check(input, res, u, c) if err != nil { // append err, resource not suit input customer errs = append(errs, err) continue } ret = append(ret, ps) } if len(ret) == 0 { return nil, errors.NewAggregate(errs) } return ret, nil } func (p *BaseSchedtagPredicate) GetInputResourcesMap(candidateId string) SchedtagInputResourcesMap { ret, ok := p.CandidateInputResources.Load(candidateId) if !ok { ret = make(map[int][]*PredicatedSchedtagResource) p.CandidateInputResources.Store(candidateId, ret) } return ret.(map[int][]*PredicatedSchedtagResource) } func (p *BaseSchedtagPredicate) PreExecute(sp ISchedtagPredicateInstance, u *core.Unit, cs []core.Candidater) (bool, error) { input := sp.GetInputs(u) if len(input) == 0 { return false, nil } p.Hypervisor = u.GetHypervisor() // always do select step u.AppendSelectPlugin(sp) return true, nil } func (p *BaseSchedtagPredicate) Execute( sp ISchedtagPredicateInstance, u *core.Unit, c core.Candidater, ) (bool, []core.PredicateFailureReason, error) { inputs := sp.GetInputs(u) resources := sp.GetResources(c) h := NewPredicateHelper(sp, u, c) inputRes := p.GetInputResourcesMap(c.IndexKey()) filterErrs := make([]core.PredicateFailureReason, 0) for idx, input := range inputs { fitResources := make([]ISchedtagCandidateResource, 0) errs := make([]core.PredicateFailureReason, 0) matchedRes := make([]ISchedtagCandidateResource, 0) for _, r := range resources { if sp.IsResourceMatchInput(input, r) { matchedRes = append(matchedRes, r) } } if len(matchedRes) == 0 { errs = append(errs, &FailReason{ Reason: fmt.Sprintf("Not found matched %s, candidate: %s, %s: %s", input.ResourceKeyword(), c.Getter().Name(), input.Keyword(), input.JSON(input).String()), Type: fmt.Sprintf("%s_match", input.ResourceKeyword()), }) } for _, res := range matchedRes { if err := sp.IsResourceFitInput(u, c, res, input); err == nil { fitResources = append(fitResources, res) } else { errs = append(errs, err) } } if len(fitResources) == 0 { h.ExcludeByErrors(errs) break } if len(errs) > 0 { filterErrs = append(filterErrs, errs...) } matchedResources, err := p.checkResources(input, fitResources, u, c) if err != nil { if len(filterErrs) > 0 { h.ExcludeByErrors(filterErrs) } errMsg := fmt.Sprintf("schedtag: %v", err.Error()) h.Exclude(errMsg) } inputRes[idx] = matchedResources } return h.GetResult() } func (p *BaseSchedtagPredicate) OnPriorityEnd(sp ISchedtagPredicateInstance, u *core.Unit, c core.Candidater) { resTags := []models.SSchedtag{} for _, res := range sp.GetResources(c) { resTags = append(resTags, res.GetSchedtags()...) } inputRes := p.GetInputResourcesMap(c.IndexKey()) avoidTags := inputRes.GetAvoidTags() preferTags := inputRes.GetPreferTags() avoidCountMap := GetSchedtagCount(avoidTags, resTags, api.AggregateStrategyAvoid) preferCountMap := GetSchedtagCount(preferTags, resTags, api.AggregateStrategyPrefer) setScore := SetCandidateScoreBySchedtag setScore(u, c, preferCountMap, true) setScore(u, c, avoidCountMap, false) } func (p *BaseSchedtagPredicate) OnSelectEnd(sp ISchedtagPredicateInstance, u *core.Unit, c core.Candidater, count int64) { inputRes := p.GetInputResourcesMap(c.IndexKey()) output := u.GetAllocatedResource(c.IndexKey()) inputs := sp.GetInputs(u) for idx, res := range inputRes { selRes := p.selectResource(sp, c, inputs[idx], res) sortRes := newSortCandidateResource(sp, selRes) sort.Sort(sortRes) //log.Debugf("sort result: %s", sortRes.DebugString()) sp.AddSelectResult(idx, inputs[idx], sortRes.res, output) } } type sortCandidateResource struct { predicate ISchedtagPredicateInstance res []ISchedtagCandidateResource } func newSortCandidateResource(predicate ISchedtagPredicateInstance, res []ISchedtagCandidateResource) *sortCandidateResource { return &sortCandidateResource{ predicate: predicate, res: res, } } func (s *sortCandidateResource) Len() int { return len(s.res) } func (s *sortCandidateResource) DebugString() string { var debugStr string for _, i := range s.res { debugStr = fmt.Sprintf("%s %d", debugStr, s.predicate.GetCandidateResourceSortScore(i)) } return debugStr } // desc order func (s *sortCandidateResource) Less(i, j int) bool { res1, res2 := s.res[i], s.res[j] v1 := s.predicate.GetCandidateResourceSortScore(res1) v2 := s.predicate.GetCandidateResourceSortScore(res2) return v1 > v2 } func (s *sortCandidateResource) Swap(i, j int) { s.res[i], s.res[j] = s.res[j], s.res[i] } func (p *BaseSchedtagPredicate) selectResource( sp ISchedtagPredicateInstance, c core.Candidater, input ISchedtagCustomer, ress []*PredicatedSchedtagResource, ) []ISchedtagCandidateResource { preferRes := make([]ISchedtagCandidateResource, 0) noTagRes := make([]ISchedtagCandidateResource, 0) avoidRes := make([]ISchedtagCandidateResource, 0) for _, res := range ress { if res.isNoTag() { noTagRes = append(noTagRes, res.ISchedtagCandidateResource) } else if res.hasPreferTags() { preferRes = append(preferRes, res.ISchedtagCandidateResource) } else if res.hasAvoidTags() { avoidRes = append(avoidRes, res.ISchedtagCandidateResource) } } for _, ress := range [][]ISchedtagCandidateResource{ preferRes, noTagRes, avoidRes, } { if len(ress) == 0 { continue } if ret := sp.DoSelect(c, input, ress); ret != nil { return ret } } return nil }
{ return m.getAllTags(true) }
identifier_body
predicates.go
// Copyright 2019 Yunion // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package predicates import ( "fmt" "sort" "strings" "sync" "yunion.io/x/jsonutils" "yunion.io/x/log" "yunion.io/x/pkg/util/errors" computeapi "yunion.io/x/onecloud/pkg/apis/compute" "yunion.io/x/onecloud/pkg/compute/models" "yunion.io/x/onecloud/pkg/scheduler/algorithm/plugin" "yunion.io/x/onecloud/pkg/scheduler/api" "yunion.io/x/onecloud/pkg/scheduler/core" ) // BasePredicate is a default struct for all the predicates that will // include it and implement it's Name() and PreExecute() methods. type BasePredicate struct{} func (b *BasePredicate) Name() string { return "base_predicate_should_not_be_called" } func (b *BasePredicate) PreExecute(unit *core.Unit, candis []core.Candidater) (bool, error) { return true, nil } func (b *BasePredicate) GetHypervisorDriver(u *core.Unit) models.IGuestDriver { return models.GetDriver(u.GetHypervisor()) } type PredicateHelper struct { predicate core.FitPredicate predicateFails []core.PredicateFailureReason capacity int64 Unit *core.Unit Candidate core.Candidater } func (h *PredicateHelper) getResult() (bool, []core.PredicateFailureReason, error) { if len(h.predicateFails) > 0 { return false, h.predicateFails, nil } if h.capacity == 0 { return false, []core.PredicateFailureReason{}, nil } return true, nil, nil } func (h *PredicateHelper) GetResult() (bool, []core.PredicateFailureReason, error) { ok, reasons, err := h.getResult() if !ok { log.Warningf("[Filter Result] candidate: %q, filter: %q, is_ok: %v, reason: %q, error: %v\n", h.Candidate.IndexKey(), h.predicate.Name(), ok, getReasonsString(reasons), err) } return ok, reasons, err } func getReasonsString(reasons []core.PredicateFailureReason) string { if len(reasons) == 0 { return "" } ss := make([]string, 0, len(reasons)) for _, reason := range reasons { ss = append(ss, reason.GetReason()) } return strings.Join(ss, ", ") } func NewPredicateHelper(pre core.FitPredicate, unit *core.Unit, candi core.Candidater) *PredicateHelper { h := &PredicateHelper{ predicate: pre, capacity: core.EmptyCapacity, predicateFails: []core.PredicateFailureReason{}, Unit: unit, Candidate: candi, } return h } func (h *PredicateHelper) GetFailedResult(err error) (bool, []core.PredicateFailureReason, error) { return false, nil, err } func (h *PredicateHelper) AppendPredicateFail(reason core.PredicateFailureReason) { h.predicateFails = append(h.predicateFails, reason) } type predicateFailure struct { err core.PredicateFailureError eType string } func (f predicateFailure) GetReason() string { return f.err.GetReason() } func (f predicateFailure) GetType() string { return f.eType } func (h *PredicateHelper) AppendPredicateFailMsg(reason string) { h.AppendPredicateFailMsgWithType(reason, h.predicate.Name()) } func (h *PredicateHelper) AppendPredicateFailMsgWithType(reason string, eType string) { err := NewUnexceptedResourceError(reason) h.AppendPredicateFail(&predicateFailure{err: err, eType: eType}) } func (h *PredicateHelper) AppendInsufficientResourceError(req, total, free int64) { h.AppendPredicateFail( &predicateFailure{ err: NewInsufficientResourceError(h.Candidate.Getter().Name(), req, total, free), eType: h.predicate.Name(), }) } // SetCapacity returns the current resource capacity calculated by a filter. // And 'capacity' default is -1. func (h *PredicateHelper) SetCapacity(capacity int64) { if capacity < 0 { capacity = 0 } h.SetCapacityCounter(core.NewNormalCounter(capacity)) } func (h *PredicateHelper) SetCapacityCounter(counter core.Counter) { capacity := counter.GetCount() if capacity < core.EmptyCapacity { capacity = core.EmptyCapacity } h.capacity = capacity h.Unit.SetCapacity(h.Candidate.IndexKey(), h.predicate.Name(), counter) } func (h *PredicateHelper) SetSelectPriority(sp int) { if sp < 0 { sp = 0 } h.Unit.SetSelectPriorityWithLock(h.Candidate.IndexKey(), h.predicate.Name(), core.SSelectPriorityValue(sp)) } func (h *PredicateHelper) Exclude(reason string) { h.SetCapacity(0) h.AppendPredicateFailMsg(reason) } func (h *PredicateHelper) ExcludeByErrors(errs []core.PredicateFailureReason) { h.SetCapacity(0) for _, err := range errs { h.AppendPredicateFail(err) } } func (h *PredicateHelper) Exclude2(predicateName string, current, expected interface{}) { h.Exclude(fmt.Sprintf("%s is '%v', expected '%v'", predicateName, current, expected)) } // UseReserved check whether the unit can use guest reserved resource func (h *PredicateHelper) UseReserved() bool { usable := false data := h.Unit.SchedData() isoDevs := data.IsolatedDevices if len(isoDevs) > 0 { usable = true } return usable } type PredicatedSchedtagResource struct { ISchedtagCandidateResource PreferTags []computeapi.SchedtagConfig AvoidTags []computeapi.SchedtagConfig } type SchedtagInputResourcesMap map[int][]*PredicatedSchedtagResource func (m SchedtagInputResourcesMap) getAllTags(isPrefer bool) []computeapi.SchedtagConfig { ret := make([]computeapi.SchedtagConfig, 0) for _, ss := range m { for _, s := range ss { var tags []computeapi.SchedtagConfig if isPrefer { tags = s.PreferTags } else { tags = s.AvoidTags } ret = append(ret, tags...) } } return ret } func (m SchedtagInputResourcesMap) GetPreferTags() []computeapi.SchedtagConfig { return m.getAllTags(true) } func (m SchedtagInputResourcesMap) GetAvoidTags() []computeapi.SchedtagConfig { return m.getAllTags(false) } type CandidateInputResourcesMap struct { *sync.Map // map[string]SchedtagInputResourcesMap } type ISchedtagCandidateResource interface { GetName() string GetId() string Keyword() string GetSchedtags() []models.SSchedtag GetSchedtagJointManager() models.ISchedtagJointManager GetDynamicConditionInput() *jsonutils.JSONDict } type ISchedtagPredicateInstance interface { core.FitPredicate OnPriorityEnd(u *core.Unit, c core.Candidater) OnSelectEnd(u *core.Unit, c core.Candidater, count int64) GetInputs(u *core.Unit) []ISchedtagCustomer GetResources(c core.Candidater) []ISchedtagCandidateResource IsResourceMatchInput(input ISchedtagCustomer, res ISchedtagCandidateResource) bool IsResourceFitInput(unit *core.Unit, c core.Candidater, res ISchedtagCandidateResource, input ISchedtagCustomer) core.PredicateFailureReason DoSelect(c core.Candidater, input ISchedtagCustomer, res []ISchedtagCandidateResource) []ISchedtagCandidateResource AddSelectResult(index int, input ISchedtagCustomer, selectRes []ISchedtagCandidateResource, output *core.AllocatedResource) GetCandidateResourceSortScore(candidate ISchedtagCandidateResource) int64 } type BaseSchedtagPredicate struct { BasePredicate plugin.BasePlugin CandidateInputResources *CandidateInputResourcesMap Hypervisor string } func NewBaseSchedtagPredicate() *BaseSchedtagPredicate { return &BaseSchedtagPredicate{ CandidateInputResources: &CandidateInputResourcesMap{Map: new(sync.Map)}, // make(map[string]SchedtagInputResourcesMap), } } func (p *PredicatedSchedtagResource) isNoTag() bool { return len(p.PreferTags) == 0 && len(p.AvoidTags) == 0 } func (p *PredicatedSchedtagResource) hasPreferTags() bool { return len(p.PreferTags) != 0 } func (p *PredicatedSchedtagResource) hasAvoidTags() bool { return len(p.AvoidTags) != 0 } type ISchedtagCustomer interface { JSON(interface{}) *jsonutils.JSONDict Keyword() string IsSpecifyResource() bool GetSchedtags() []*computeapi.SchedtagConfig ResourceKeyword() string } type SchedtagResourceW struct { candidater ISchedtagCandidateResource input ISchedtagCustomer } func (w SchedtagResourceW) IndexKey() string { return fmt.Sprintf("%s:%s", w.candidater.GetName(), w.candidater.GetId()) } func (w SchedtagResourceW) ResourceType() string { return getSchedtagResourceType(w.candidater) } func getSchedtagResourceType(candidater ISchedtagCandidateResource) string { return candidater.GetSchedtagJointManager().GetMasterManager().KeywordPlural() } func (w SchedtagResourceW) GetSchedtags() []models.SSchedtag { return w.candidater.GetSchedtags() } func (w SchedtagResourceW) GetDynamicSchedDesc() *jsonutils.JSONDict { ret := jsonutils.NewDict() resSchedDesc := w.candidater.GetDynamicConditionInput() inputSchedDesc := w.input.JSON(w.input) ret.Add(resSchedDesc, w.candidater.Keyword()) ret.Add(inputSchedDesc, w.input.Keyword()) return ret } func (p *BaseSchedtagPredicate) GetHypervisorDriver() models.IGuestDriver { return models.GetDriver(p.Hypervisor) } func (p *BaseSchedtagPredicate) check(input ISchedtagCustomer, candidate ISchedtagCandidateResource, u *core.Unit, c core.Candidater) (*PredicatedSchedtagResource, error) { allTags, err := GetAllSchedtags(getSchedtagResourceType(candidate)) if err != nil { return nil, err } tagPredicate := NewSchedtagPredicate(input.GetSchedtags(), allTags) shouldExec := u.ShouldExecuteSchedtagFilter(c.Getter().Id()) res := &PredicatedSchedtagResource{ ISchedtagCandidateResource: candidate, } if shouldExec && !input.IsSpecifyResource() { if err := tagPredicate.Check( SchedtagResourceW{ candidater: candidate, input: input, }, ); err != nil { return nil, err } res.PreferTags = tagPredicate.GetPreferTags() res.AvoidTags = tagPredicate.GetAvoidTags() } return res, nil } func (p *BaseSchedtagPredicate) checkResources(input ISchedtagCustomer, ress []ISchedtagCandidateResource, u *core.Unit, c core.Candidater) ([]*PredicatedSchedtagResource, error) { errs := make([]error, 0) ret := make([]*PredicatedSchedtagResource, 0) for _, res := range ress { ps, err := p.check(input, res, u, c) if err != nil { // append err, resource not suit input customer errs = append(errs, err) continue } ret = append(ret, ps) } if len(ret) == 0 { return nil, errors.NewAggregate(errs) } return ret, nil } func (p *BaseSchedtagPredicate) GetInputResourcesMap(candidateId string) SchedtagInputResourcesMap { ret, ok := p.CandidateInputResources.Load(candidateId) if !ok { ret = make(map[int][]*PredicatedSchedtagResource) p.CandidateInputResources.Store(candidateId, ret) } return ret.(map[int][]*PredicatedSchedtagResource) } func (p *BaseSchedtagPredicate) PreExecute(sp ISchedtagPredicateInstance, u *core.Unit, cs []core.Candidater) (bool, error) { input := sp.GetInputs(u) if len(input) == 0 { return false, nil } p.Hypervisor = u.GetHypervisor() // always do select step u.AppendSelectPlugin(sp) return true, nil } func (p *BaseSchedtagPredicate) Execute( sp ISchedtagPredicateInstance, u *core.Unit, c core.Candidater, ) (bool, []core.PredicateFailureReason, error) { inputs := sp.GetInputs(u) resources := sp.GetResources(c) h := NewPredicateHelper(sp, u, c) inputRes := p.GetInputResourcesMap(c.IndexKey()) filterErrs := make([]core.PredicateFailureReason, 0) for idx, input := range inputs { fitResources := make([]ISchedtagCandidateResource, 0) errs := make([]core.PredicateFailureReason, 0) matchedRes := make([]ISchedtagCandidateResource, 0) for _, r := range resources { if sp.IsResourceMatchInput(input, r) { matchedRes = append(matchedRes, r) } } if len(matchedRes) == 0 { errs = append(errs, &FailReason{ Reason: fmt.Sprintf("Not found matched %s, candidate: %s, %s: %s", input.ResourceKeyword(), c.Getter().Name(), input.Keyword(), input.JSON(input).String()), Type: fmt.Sprintf("%s_match", input.ResourceKeyword()), }) } for _, res := range matchedRes { if err := sp.IsResourceFitInput(u, c, res, input); err == nil { fitResources = append(fitResources, res) } else { errs = append(errs, err) } } if len(fitResources) == 0 { h.ExcludeByErrors(errs) break } if len(errs) > 0 { filterErrs = append(filterErrs, errs...) } matchedResources, err := p.checkResources(input, fitResources, u, c) if err != nil { if len(filterErrs) > 0 { h.ExcludeByErrors(filterErrs) } errMsg := fmt.Sprintf("schedtag: %v", err.Error()) h.Exclude(errMsg) } inputRes[idx] = matchedResources } return h.GetResult() } func (p *BaseSchedtagPredicate) OnPriorityEnd(sp ISchedtagPredicateInstance, u *core.Unit, c core.Candidater) { resTags := []models.SSchedtag{} for _, res := range sp.GetResources(c) { resTags = append(resTags, res.GetSchedtags()...) } inputRes := p.GetInputResourcesMap(c.IndexKey()) avoidTags := inputRes.GetAvoidTags() preferTags := inputRes.GetPreferTags() avoidCountMap := GetSchedtagCount(avoidTags, resTags, api.AggregateStrategyAvoid) preferCountMap := GetSchedtagCount(preferTags, resTags, api.AggregateStrategyPrefer) setScore := SetCandidateScoreBySchedtag setScore(u, c, preferCountMap, true) setScore(u, c, avoidCountMap, false) } func (p *BaseSchedtagPredicate)
(sp ISchedtagPredicateInstance, u *core.Unit, c core.Candidater, count int64) { inputRes := p.GetInputResourcesMap(c.IndexKey()) output := u.GetAllocatedResource(c.IndexKey()) inputs := sp.GetInputs(u) for idx, res := range inputRes { selRes := p.selectResource(sp, c, inputs[idx], res) sortRes := newSortCandidateResource(sp, selRes) sort.Sort(sortRes) //log.Debugf("sort result: %s", sortRes.DebugString()) sp.AddSelectResult(idx, inputs[idx], sortRes.res, output) } } type sortCandidateResource struct { predicate ISchedtagPredicateInstance res []ISchedtagCandidateResource } func newSortCandidateResource(predicate ISchedtagPredicateInstance, res []ISchedtagCandidateResource) *sortCandidateResource { return &sortCandidateResource{ predicate: predicate, res: res, } } func (s *sortCandidateResource) Len() int { return len(s.res) } func (s *sortCandidateResource) DebugString() string { var debugStr string for _, i := range s.res { debugStr = fmt.Sprintf("%s %d", debugStr, s.predicate.GetCandidateResourceSortScore(i)) } return debugStr } // desc order func (s *sortCandidateResource) Less(i, j int) bool { res1, res2 := s.res[i], s.res[j] v1 := s.predicate.GetCandidateResourceSortScore(res1) v2 := s.predicate.GetCandidateResourceSortScore(res2) return v1 > v2 } func (s *sortCandidateResource) Swap(i, j int) { s.res[i], s.res[j] = s.res[j], s.res[i] } func (p *BaseSchedtagPredicate) selectResource( sp ISchedtagPredicateInstance, c core.Candidater, input ISchedtagCustomer, ress []*PredicatedSchedtagResource, ) []ISchedtagCandidateResource { preferRes := make([]ISchedtagCandidateResource, 0) noTagRes := make([]ISchedtagCandidateResource, 0) avoidRes := make([]ISchedtagCandidateResource, 0) for _, res := range ress { if res.isNoTag() { noTagRes = append(noTagRes, res.ISchedtagCandidateResource) } else if res.hasPreferTags() { preferRes = append(preferRes, res.ISchedtagCandidateResource) } else if res.hasAvoidTags() { avoidRes = append(avoidRes, res.ISchedtagCandidateResource) } } for _, ress := range [][]ISchedtagCandidateResource{ preferRes, noTagRes, avoidRes, } { if len(ress) == 0 { continue } if ret := sp.DoSelect(c, input, ress); ret != nil { return ret } } return nil }
OnSelectEnd
identifier_name
predicates.go
// Copyright 2019 Yunion // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package predicates import ( "fmt" "sort" "strings" "sync" "yunion.io/x/jsonutils" "yunion.io/x/log" "yunion.io/x/pkg/util/errors" computeapi "yunion.io/x/onecloud/pkg/apis/compute" "yunion.io/x/onecloud/pkg/compute/models" "yunion.io/x/onecloud/pkg/scheduler/algorithm/plugin" "yunion.io/x/onecloud/pkg/scheduler/api" "yunion.io/x/onecloud/pkg/scheduler/core" ) // BasePredicate is a default struct for all the predicates that will // include it and implement it's Name() and PreExecute() methods. type BasePredicate struct{} func (b *BasePredicate) Name() string { return "base_predicate_should_not_be_called" } func (b *BasePredicate) PreExecute(unit *core.Unit, candis []core.Candidater) (bool, error) { return true, nil } func (b *BasePredicate) GetHypervisorDriver(u *core.Unit) models.IGuestDriver { return models.GetDriver(u.GetHypervisor()) } type PredicateHelper struct { predicate core.FitPredicate predicateFails []core.PredicateFailureReason capacity int64 Unit *core.Unit Candidate core.Candidater } func (h *PredicateHelper) getResult() (bool, []core.PredicateFailureReason, error) { if len(h.predicateFails) > 0 { return false, h.predicateFails, nil } if h.capacity == 0 { return false, []core.PredicateFailureReason{}, nil } return true, nil, nil } func (h *PredicateHelper) GetResult() (bool, []core.PredicateFailureReason, error) { ok, reasons, err := h.getResult() if !ok { log.Warningf("[Filter Result] candidate: %q, filter: %q, is_ok: %v, reason: %q, error: %v\n", h.Candidate.IndexKey(), h.predicate.Name(), ok, getReasonsString(reasons), err) } return ok, reasons, err } func getReasonsString(reasons []core.PredicateFailureReason) string { if len(reasons) == 0 { return "" } ss := make([]string, 0, len(reasons)) for _, reason := range reasons { ss = append(ss, reason.GetReason()) } return strings.Join(ss, ", ") } func NewPredicateHelper(pre core.FitPredicate, unit *core.Unit, candi core.Candidater) *PredicateHelper { h := &PredicateHelper{ predicate: pre, capacity: core.EmptyCapacity, predicateFails: []core.PredicateFailureReason{}, Unit: unit, Candidate: candi, } return h } func (h *PredicateHelper) GetFailedResult(err error) (bool, []core.PredicateFailureReason, error) { return false, nil, err } func (h *PredicateHelper) AppendPredicateFail(reason core.PredicateFailureReason) { h.predicateFails = append(h.predicateFails, reason) } type predicateFailure struct { err core.PredicateFailureError eType string } func (f predicateFailure) GetReason() string { return f.err.GetReason() } func (f predicateFailure) GetType() string { return f.eType } func (h *PredicateHelper) AppendPredicateFailMsg(reason string) { h.AppendPredicateFailMsgWithType(reason, h.predicate.Name()) } func (h *PredicateHelper) AppendPredicateFailMsgWithType(reason string, eType string) { err := NewUnexceptedResourceError(reason) h.AppendPredicateFail(&predicateFailure{err: err, eType: eType}) } func (h *PredicateHelper) AppendInsufficientResourceError(req, total, free int64) { h.AppendPredicateFail( &predicateFailure{ err: NewInsufficientResourceError(h.Candidate.Getter().Name(), req, total, free), eType: h.predicate.Name(), }) } // SetCapacity returns the current resource capacity calculated by a filter. // And 'capacity' default is -1. func (h *PredicateHelper) SetCapacity(capacity int64) { if capacity < 0 { capacity = 0 } h.SetCapacityCounter(core.NewNormalCounter(capacity)) } func (h *PredicateHelper) SetCapacityCounter(counter core.Counter) { capacity := counter.GetCount() if capacity < core.EmptyCapacity { capacity = core.EmptyCapacity } h.capacity = capacity h.Unit.SetCapacity(h.Candidate.IndexKey(), h.predicate.Name(), counter) } func (h *PredicateHelper) SetSelectPriority(sp int) { if sp < 0 { sp = 0 } h.Unit.SetSelectPriorityWithLock(h.Candidate.IndexKey(), h.predicate.Name(), core.SSelectPriorityValue(sp)) } func (h *PredicateHelper) Exclude(reason string) { h.SetCapacity(0) h.AppendPredicateFailMsg(reason) } func (h *PredicateHelper) ExcludeByErrors(errs []core.PredicateFailureReason) { h.SetCapacity(0) for _, err := range errs { h.AppendPredicateFail(err) } } func (h *PredicateHelper) Exclude2(predicateName string, current, expected interface{}) { h.Exclude(fmt.Sprintf("%s is '%v', expected '%v'", predicateName, current, expected)) } // UseReserved check whether the unit can use guest reserved resource func (h *PredicateHelper) UseReserved() bool { usable := false data := h.Unit.SchedData() isoDevs := data.IsolatedDevices if len(isoDevs) > 0 { usable = true } return usable } type PredicatedSchedtagResource struct { ISchedtagCandidateResource PreferTags []computeapi.SchedtagConfig AvoidTags []computeapi.SchedtagConfig } type SchedtagInputResourcesMap map[int][]*PredicatedSchedtagResource func (m SchedtagInputResourcesMap) getAllTags(isPrefer bool) []computeapi.SchedtagConfig { ret := make([]computeapi.SchedtagConfig, 0) for _, ss := range m { for _, s := range ss { var tags []computeapi.SchedtagConfig if isPrefer { tags = s.PreferTags } else { tags = s.AvoidTags } ret = append(ret, tags...) } } return ret } func (m SchedtagInputResourcesMap) GetPreferTags() []computeapi.SchedtagConfig { return m.getAllTags(true) } func (m SchedtagInputResourcesMap) GetAvoidTags() []computeapi.SchedtagConfig { return m.getAllTags(false) } type CandidateInputResourcesMap struct { *sync.Map // map[string]SchedtagInputResourcesMap } type ISchedtagCandidateResource interface { GetName() string GetId() string Keyword() string GetSchedtags() []models.SSchedtag GetSchedtagJointManager() models.ISchedtagJointManager GetDynamicConditionInput() *jsonutils.JSONDict } type ISchedtagPredicateInstance interface { core.FitPredicate OnPriorityEnd(u *core.Unit, c core.Candidater) OnSelectEnd(u *core.Unit, c core.Candidater, count int64) GetInputs(u *core.Unit) []ISchedtagCustomer GetResources(c core.Candidater) []ISchedtagCandidateResource IsResourceMatchInput(input ISchedtagCustomer, res ISchedtagCandidateResource) bool IsResourceFitInput(unit *core.Unit, c core.Candidater, res ISchedtagCandidateResource, input ISchedtagCustomer) core.PredicateFailureReason DoSelect(c core.Candidater, input ISchedtagCustomer, res []ISchedtagCandidateResource) []ISchedtagCandidateResource AddSelectResult(index int, input ISchedtagCustomer, selectRes []ISchedtagCandidateResource, output *core.AllocatedResource) GetCandidateResourceSortScore(candidate ISchedtagCandidateResource) int64 } type BaseSchedtagPredicate struct { BasePredicate plugin.BasePlugin CandidateInputResources *CandidateInputResourcesMap Hypervisor string } func NewBaseSchedtagPredicate() *BaseSchedtagPredicate { return &BaseSchedtagPredicate{ CandidateInputResources: &CandidateInputResourcesMap{Map: new(sync.Map)}, // make(map[string]SchedtagInputResourcesMap), } } func (p *PredicatedSchedtagResource) isNoTag() bool { return len(p.PreferTags) == 0 && len(p.AvoidTags) == 0 } func (p *PredicatedSchedtagResource) hasPreferTags() bool { return len(p.PreferTags) != 0 } func (p *PredicatedSchedtagResource) hasAvoidTags() bool { return len(p.AvoidTags) != 0 } type ISchedtagCustomer interface { JSON(interface{}) *jsonutils.JSONDict Keyword() string IsSpecifyResource() bool GetSchedtags() []*computeapi.SchedtagConfig ResourceKeyword() string } type SchedtagResourceW struct { candidater ISchedtagCandidateResource input ISchedtagCustomer } func (w SchedtagResourceW) IndexKey() string { return fmt.Sprintf("%s:%s", w.candidater.GetName(), w.candidater.GetId()) } func (w SchedtagResourceW) ResourceType() string { return getSchedtagResourceType(w.candidater) } func getSchedtagResourceType(candidater ISchedtagCandidateResource) string { return candidater.GetSchedtagJointManager().GetMasterManager().KeywordPlural() } func (w SchedtagResourceW) GetSchedtags() []models.SSchedtag { return w.candidater.GetSchedtags() } func (w SchedtagResourceW) GetDynamicSchedDesc() *jsonutils.JSONDict { ret := jsonutils.NewDict() resSchedDesc := w.candidater.GetDynamicConditionInput() inputSchedDesc := w.input.JSON(w.input) ret.Add(resSchedDesc, w.candidater.Keyword()) ret.Add(inputSchedDesc, w.input.Keyword()) return ret } func (p *BaseSchedtagPredicate) GetHypervisorDriver() models.IGuestDriver { return models.GetDriver(p.Hypervisor) } func (p *BaseSchedtagPredicate) check(input ISchedtagCustomer, candidate ISchedtagCandidateResource, u *core.Unit, c core.Candidater) (*PredicatedSchedtagResource, error) { allTags, err := GetAllSchedtags(getSchedtagResourceType(candidate)) if err != nil { return nil, err } tagPredicate := NewSchedtagPredicate(input.GetSchedtags(), allTags) shouldExec := u.ShouldExecuteSchedtagFilter(c.Getter().Id()) res := &PredicatedSchedtagResource{ ISchedtagCandidateResource: candidate, } if shouldExec && !input.IsSpecifyResource() { if err := tagPredicate.Check( SchedtagResourceW{ candidater: candidate, input: input, }, ); err != nil { return nil, err } res.PreferTags = tagPredicate.GetPreferTags() res.AvoidTags = tagPredicate.GetAvoidTags() } return res, nil } func (p *BaseSchedtagPredicate) checkResources(input ISchedtagCustomer, ress []ISchedtagCandidateResource, u *core.Unit, c core.Candidater) ([]*PredicatedSchedtagResource, error) { errs := make([]error, 0) ret := make([]*PredicatedSchedtagResource, 0) for _, res := range ress { ps, err := p.check(input, res, u, c) if err != nil { // append err, resource not suit input customer errs = append(errs, err) continue } ret = append(ret, ps) } if len(ret) == 0 { return nil, errors.NewAggregate(errs) } return ret, nil } func (p *BaseSchedtagPredicate) GetInputResourcesMap(candidateId string) SchedtagInputResourcesMap { ret, ok := p.CandidateInputResources.Load(candidateId) if !ok { ret = make(map[int][]*PredicatedSchedtagResource) p.CandidateInputResources.Store(candidateId, ret) } return ret.(map[int][]*PredicatedSchedtagResource) } func (p *BaseSchedtagPredicate) PreExecute(sp ISchedtagPredicateInstance, u *core.Unit, cs []core.Candidater) (bool, error) { input := sp.GetInputs(u) if len(input) == 0 { return false, nil } p.Hypervisor = u.GetHypervisor() // always do select step u.AppendSelectPlugin(sp) return true, nil } func (p *BaseSchedtagPredicate) Execute( sp ISchedtagPredicateInstance, u *core.Unit, c core.Candidater, ) (bool, []core.PredicateFailureReason, error) { inputs := sp.GetInputs(u) resources := sp.GetResources(c) h := NewPredicateHelper(sp, u, c) inputRes := p.GetInputResourcesMap(c.IndexKey()) filterErrs := make([]core.PredicateFailureReason, 0) for idx, input := range inputs { fitResources := make([]ISchedtagCandidateResource, 0) errs := make([]core.PredicateFailureReason, 0) matchedRes := make([]ISchedtagCandidateResource, 0) for _, r := range resources { if sp.IsResourceMatchInput(input, r) { matchedRes = append(matchedRes, r) } } if len(matchedRes) == 0 { errs = append(errs, &FailReason{ Reason: fmt.Sprintf("Not found matched %s, candidate: %s, %s: %s", input.ResourceKeyword(), c.Getter().Name(), input.Keyword(), input.JSON(input).String()), Type: fmt.Sprintf("%s_match", input.ResourceKeyword()), }) } for _, res := range matchedRes { if err := sp.IsResourceFitInput(u, c, res, input); err == nil { fitResources = append(fitResources, res) } else { errs = append(errs, err) } } if len(fitResources) == 0 { h.ExcludeByErrors(errs) break } if len(errs) > 0 { filterErrs = append(filterErrs, errs...) } matchedResources, err := p.checkResources(input, fitResources, u, c) if err != nil { if len(filterErrs) > 0 { h.ExcludeByErrors(filterErrs) } errMsg := fmt.Sprintf("schedtag: %v", err.Error()) h.Exclude(errMsg) } inputRes[idx] = matchedResources } return h.GetResult() } func (p *BaseSchedtagPredicate) OnPriorityEnd(sp ISchedtagPredicateInstance, u *core.Unit, c core.Candidater) { resTags := []models.SSchedtag{} for _, res := range sp.GetResources(c)
inputRes := p.GetInputResourcesMap(c.IndexKey()) avoidTags := inputRes.GetAvoidTags() preferTags := inputRes.GetPreferTags() avoidCountMap := GetSchedtagCount(avoidTags, resTags, api.AggregateStrategyAvoid) preferCountMap := GetSchedtagCount(preferTags, resTags, api.AggregateStrategyPrefer) setScore := SetCandidateScoreBySchedtag setScore(u, c, preferCountMap, true) setScore(u, c, avoidCountMap, false) } func (p *BaseSchedtagPredicate) OnSelectEnd(sp ISchedtagPredicateInstance, u *core.Unit, c core.Candidater, count int64) { inputRes := p.GetInputResourcesMap(c.IndexKey()) output := u.GetAllocatedResource(c.IndexKey()) inputs := sp.GetInputs(u) for idx, res := range inputRes { selRes := p.selectResource(sp, c, inputs[idx], res) sortRes := newSortCandidateResource(sp, selRes) sort.Sort(sortRes) //log.Debugf("sort result: %s", sortRes.DebugString()) sp.AddSelectResult(idx, inputs[idx], sortRes.res, output) } } type sortCandidateResource struct { predicate ISchedtagPredicateInstance res []ISchedtagCandidateResource } func newSortCandidateResource(predicate ISchedtagPredicateInstance, res []ISchedtagCandidateResource) *sortCandidateResource { return &sortCandidateResource{ predicate: predicate, res: res, } } func (s *sortCandidateResource) Len() int { return len(s.res) } func (s *sortCandidateResource) DebugString() string { var debugStr string for _, i := range s.res { debugStr = fmt.Sprintf("%s %d", debugStr, s.predicate.GetCandidateResourceSortScore(i)) } return debugStr } // desc order func (s *sortCandidateResource) Less(i, j int) bool { res1, res2 := s.res[i], s.res[j] v1 := s.predicate.GetCandidateResourceSortScore(res1) v2 := s.predicate.GetCandidateResourceSortScore(res2) return v1 > v2 } func (s *sortCandidateResource) Swap(i, j int) { s.res[i], s.res[j] = s.res[j], s.res[i] } func (p *BaseSchedtagPredicate) selectResource( sp ISchedtagPredicateInstance, c core.Candidater, input ISchedtagCustomer, ress []*PredicatedSchedtagResource, ) []ISchedtagCandidateResource { preferRes := make([]ISchedtagCandidateResource, 0) noTagRes := make([]ISchedtagCandidateResource, 0) avoidRes := make([]ISchedtagCandidateResource, 0) for _, res := range ress { if res.isNoTag() { noTagRes = append(noTagRes, res.ISchedtagCandidateResource) } else if res.hasPreferTags() { preferRes = append(preferRes, res.ISchedtagCandidateResource) } else if res.hasAvoidTags() { avoidRes = append(avoidRes, res.ISchedtagCandidateResource) } } for _, ress := range [][]ISchedtagCandidateResource{ preferRes, noTagRes, avoidRes, } { if len(ress) == 0 { continue } if ret := sp.DoSelect(c, input, ress); ret != nil { return ret } } return nil }
{ resTags = append(resTags, res.GetSchedtags()...) }
conditional_block
predicates.go
// Copyright 2019 Yunion // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package predicates import ( "fmt" "sort" "strings" "sync" "yunion.io/x/jsonutils" "yunion.io/x/log" "yunion.io/x/pkg/util/errors" computeapi "yunion.io/x/onecloud/pkg/apis/compute" "yunion.io/x/onecloud/pkg/compute/models" "yunion.io/x/onecloud/pkg/scheduler/algorithm/plugin" "yunion.io/x/onecloud/pkg/scheduler/api" "yunion.io/x/onecloud/pkg/scheduler/core" ) // BasePredicate is a default struct for all the predicates that will // include it and implement it's Name() and PreExecute() methods. type BasePredicate struct{} func (b *BasePredicate) Name() string { return "base_predicate_should_not_be_called" } func (b *BasePredicate) PreExecute(unit *core.Unit, candis []core.Candidater) (bool, error) { return true, nil } func (b *BasePredicate) GetHypervisorDriver(u *core.Unit) models.IGuestDriver { return models.GetDriver(u.GetHypervisor()) } type PredicateHelper struct { predicate core.FitPredicate predicateFails []core.PredicateFailureReason capacity int64 Unit *core.Unit Candidate core.Candidater } func (h *PredicateHelper) getResult() (bool, []core.PredicateFailureReason, error) { if len(h.predicateFails) > 0 { return false, h.predicateFails, nil } if h.capacity == 0 { return false, []core.PredicateFailureReason{}, nil } return true, nil, nil } func (h *PredicateHelper) GetResult() (bool, []core.PredicateFailureReason, error) { ok, reasons, err := h.getResult() if !ok { log.Warningf("[Filter Result] candidate: %q, filter: %q, is_ok: %v, reason: %q, error: %v\n", h.Candidate.IndexKey(), h.predicate.Name(), ok, getReasonsString(reasons), err) } return ok, reasons, err } func getReasonsString(reasons []core.PredicateFailureReason) string { if len(reasons) == 0 { return "" } ss := make([]string, 0, len(reasons)) for _, reason := range reasons { ss = append(ss, reason.GetReason()) } return strings.Join(ss, ", ") } func NewPredicateHelper(pre core.FitPredicate, unit *core.Unit, candi core.Candidater) *PredicateHelper { h := &PredicateHelper{ predicate: pre, capacity: core.EmptyCapacity, predicateFails: []core.PredicateFailureReason{}, Unit: unit, Candidate: candi, } return h } func (h *PredicateHelper) GetFailedResult(err error) (bool, []core.PredicateFailureReason, error) { return false, nil, err } func (h *PredicateHelper) AppendPredicateFail(reason core.PredicateFailureReason) { h.predicateFails = append(h.predicateFails, reason) } type predicateFailure struct { err core.PredicateFailureError eType string } func (f predicateFailure) GetReason() string { return f.err.GetReason() } func (f predicateFailure) GetType() string { return f.eType } func (h *PredicateHelper) AppendPredicateFailMsg(reason string) { h.AppendPredicateFailMsgWithType(reason, h.predicate.Name()) } func (h *PredicateHelper) AppendPredicateFailMsgWithType(reason string, eType string) { err := NewUnexceptedResourceError(reason) h.AppendPredicateFail(&predicateFailure{err: err, eType: eType}) } func (h *PredicateHelper) AppendInsufficientResourceError(req, total, free int64) { h.AppendPredicateFail( &predicateFailure{ err: NewInsufficientResourceError(h.Candidate.Getter().Name(), req, total, free), eType: h.predicate.Name(), }) } // SetCapacity returns the current resource capacity calculated by a filter. // And 'capacity' default is -1. func (h *PredicateHelper) SetCapacity(capacity int64) { if capacity < 0 { capacity = 0 } h.SetCapacityCounter(core.NewNormalCounter(capacity)) } func (h *PredicateHelper) SetCapacityCounter(counter core.Counter) { capacity := counter.GetCount() if capacity < core.EmptyCapacity { capacity = core.EmptyCapacity } h.capacity = capacity h.Unit.SetCapacity(h.Candidate.IndexKey(), h.predicate.Name(), counter) } func (h *PredicateHelper) SetSelectPriority(sp int) { if sp < 0 { sp = 0 } h.Unit.SetSelectPriorityWithLock(h.Candidate.IndexKey(), h.predicate.Name(), core.SSelectPriorityValue(sp)) } func (h *PredicateHelper) Exclude(reason string) { h.SetCapacity(0) h.AppendPredicateFailMsg(reason) } func (h *PredicateHelper) ExcludeByErrors(errs []core.PredicateFailureReason) { h.SetCapacity(0) for _, err := range errs { h.AppendPredicateFail(err) } } func (h *PredicateHelper) Exclude2(predicateName string, current, expected interface{}) { h.Exclude(fmt.Sprintf("%s is '%v', expected '%v'", predicateName, current, expected)) } // UseReserved check whether the unit can use guest reserved resource func (h *PredicateHelper) UseReserved() bool { usable := false data := h.Unit.SchedData() isoDevs := data.IsolatedDevices if len(isoDevs) > 0 { usable = true } return usable } type PredicatedSchedtagResource struct { ISchedtagCandidateResource PreferTags []computeapi.SchedtagConfig AvoidTags []computeapi.SchedtagConfig } type SchedtagInputResourcesMap map[int][]*PredicatedSchedtagResource func (m SchedtagInputResourcesMap) getAllTags(isPrefer bool) []computeapi.SchedtagConfig { ret := make([]computeapi.SchedtagConfig, 0) for _, ss := range m { for _, s := range ss { var tags []computeapi.SchedtagConfig if isPrefer { tags = s.PreferTags } else { tags = s.AvoidTags } ret = append(ret, tags...) } } return ret } func (m SchedtagInputResourcesMap) GetPreferTags() []computeapi.SchedtagConfig { return m.getAllTags(true) } func (m SchedtagInputResourcesMap) GetAvoidTags() []computeapi.SchedtagConfig { return m.getAllTags(false) } type CandidateInputResourcesMap struct { *sync.Map // map[string]SchedtagInputResourcesMap } type ISchedtagCandidateResource interface { GetName() string GetId() string Keyword() string GetSchedtags() []models.SSchedtag GetSchedtagJointManager() models.ISchedtagJointManager GetDynamicConditionInput() *jsonutils.JSONDict } type ISchedtagPredicateInstance interface { core.FitPredicate OnPriorityEnd(u *core.Unit, c core.Candidater) OnSelectEnd(u *core.Unit, c core.Candidater, count int64) GetInputs(u *core.Unit) []ISchedtagCustomer GetResources(c core.Candidater) []ISchedtagCandidateResource IsResourceMatchInput(input ISchedtagCustomer, res ISchedtagCandidateResource) bool IsResourceFitInput(unit *core.Unit, c core.Candidater, res ISchedtagCandidateResource, input ISchedtagCustomer) core.PredicateFailureReason DoSelect(c core.Candidater, input ISchedtagCustomer, res []ISchedtagCandidateResource) []ISchedtagCandidateResource AddSelectResult(index int, input ISchedtagCustomer, selectRes []ISchedtagCandidateResource, output *core.AllocatedResource) GetCandidateResourceSortScore(candidate ISchedtagCandidateResource) int64 } type BaseSchedtagPredicate struct { BasePredicate plugin.BasePlugin CandidateInputResources *CandidateInputResourcesMap Hypervisor string } func NewBaseSchedtagPredicate() *BaseSchedtagPredicate { return &BaseSchedtagPredicate{ CandidateInputResources: &CandidateInputResourcesMap{Map: new(sync.Map)}, // make(map[string]SchedtagInputResourcesMap), } } func (p *PredicatedSchedtagResource) isNoTag() bool { return len(p.PreferTags) == 0 && len(p.AvoidTags) == 0 } func (p *PredicatedSchedtagResource) hasPreferTags() bool { return len(p.PreferTags) != 0 } func (p *PredicatedSchedtagResource) hasAvoidTags() bool { return len(p.AvoidTags) != 0 } type ISchedtagCustomer interface { JSON(interface{}) *jsonutils.JSONDict Keyword() string IsSpecifyResource() bool GetSchedtags() []*computeapi.SchedtagConfig ResourceKeyword() string } type SchedtagResourceW struct { candidater ISchedtagCandidateResource input ISchedtagCustomer } func (w SchedtagResourceW) IndexKey() string { return fmt.Sprintf("%s:%s", w.candidater.GetName(), w.candidater.GetId()) } func (w SchedtagResourceW) ResourceType() string { return getSchedtagResourceType(w.candidater) } func getSchedtagResourceType(candidater ISchedtagCandidateResource) string { return candidater.GetSchedtagJointManager().GetMasterManager().KeywordPlural() } func (w SchedtagResourceW) GetSchedtags() []models.SSchedtag { return w.candidater.GetSchedtags() } func (w SchedtagResourceW) GetDynamicSchedDesc() *jsonutils.JSONDict { ret := jsonutils.NewDict() resSchedDesc := w.candidater.GetDynamicConditionInput() inputSchedDesc := w.input.JSON(w.input) ret.Add(resSchedDesc, w.candidater.Keyword()) ret.Add(inputSchedDesc, w.input.Keyword()) return ret } func (p *BaseSchedtagPredicate) GetHypervisorDriver() models.IGuestDriver { return models.GetDriver(p.Hypervisor) } func (p *BaseSchedtagPredicate) check(input ISchedtagCustomer, candidate ISchedtagCandidateResource, u *core.Unit, c core.Candidater) (*PredicatedSchedtagResource, error) { allTags, err := GetAllSchedtags(getSchedtagResourceType(candidate)) if err != nil { return nil, err } tagPredicate := NewSchedtagPredicate(input.GetSchedtags(), allTags) shouldExec := u.ShouldExecuteSchedtagFilter(c.Getter().Id()) res := &PredicatedSchedtagResource{ ISchedtagCandidateResource: candidate, } if shouldExec && !input.IsSpecifyResource() { if err := tagPredicate.Check( SchedtagResourceW{ candidater: candidate, input: input, }, ); err != nil { return nil, err } res.PreferTags = tagPredicate.GetPreferTags() res.AvoidTags = tagPredicate.GetAvoidTags() } return res, nil } func (p *BaseSchedtagPredicate) checkResources(input ISchedtagCustomer, ress []ISchedtagCandidateResource, u *core.Unit, c core.Candidater) ([]*PredicatedSchedtagResource, error) { errs := make([]error, 0) ret := make([]*PredicatedSchedtagResource, 0) for _, res := range ress { ps, err := p.check(input, res, u, c) if err != nil { // append err, resource not suit input customer errs = append(errs, err) continue } ret = append(ret, ps) } if len(ret) == 0 { return nil, errors.NewAggregate(errs) } return ret, nil } func (p *BaseSchedtagPredicate) GetInputResourcesMap(candidateId string) SchedtagInputResourcesMap { ret, ok := p.CandidateInputResources.Load(candidateId) if !ok { ret = make(map[int][]*PredicatedSchedtagResource) p.CandidateInputResources.Store(candidateId, ret) } return ret.(map[int][]*PredicatedSchedtagResource) } func (p *BaseSchedtagPredicate) PreExecute(sp ISchedtagPredicateInstance, u *core.Unit, cs []core.Candidater) (bool, error) { input := sp.GetInputs(u) if len(input) == 0 { return false, nil } p.Hypervisor = u.GetHypervisor() // always do select step u.AppendSelectPlugin(sp) return true, nil } func (p *BaseSchedtagPredicate) Execute( sp ISchedtagPredicateInstance, u *core.Unit, c core.Candidater, ) (bool, []core.PredicateFailureReason, error) { inputs := sp.GetInputs(u) resources := sp.GetResources(c) h := NewPredicateHelper(sp, u, c) inputRes := p.GetInputResourcesMap(c.IndexKey()) filterErrs := make([]core.PredicateFailureReason, 0) for idx, input := range inputs { fitResources := make([]ISchedtagCandidateResource, 0) errs := make([]core.PredicateFailureReason, 0) matchedRes := make([]ISchedtagCandidateResource, 0) for _, r := range resources { if sp.IsResourceMatchInput(input, r) { matchedRes = append(matchedRes, r) } } if len(matchedRes) == 0 { errs = append(errs, &FailReason{ Reason: fmt.Sprintf("Not found matched %s, candidate: %s, %s: %s", input.ResourceKeyword(), c.Getter().Name(), input.Keyword(), input.JSON(input).String()), Type: fmt.Sprintf("%s_match", input.ResourceKeyword()), }) } for _, res := range matchedRes { if err := sp.IsResourceFitInput(u, c, res, input); err == nil { fitResources = append(fitResources, res) } else { errs = append(errs, err) } } if len(fitResources) == 0 { h.ExcludeByErrors(errs) break } if len(errs) > 0 { filterErrs = append(filterErrs, errs...) } matchedResources, err := p.checkResources(input, fitResources, u, c) if err != nil { if len(filterErrs) > 0 { h.ExcludeByErrors(filterErrs) } errMsg := fmt.Sprintf("schedtag: %v", err.Error()) h.Exclude(errMsg) } inputRes[idx] = matchedResources } return h.GetResult() } func (p *BaseSchedtagPredicate) OnPriorityEnd(sp ISchedtagPredicateInstance, u *core.Unit, c core.Candidater) { resTags := []models.SSchedtag{} for _, res := range sp.GetResources(c) { resTags = append(resTags, res.GetSchedtags()...) } inputRes := p.GetInputResourcesMap(c.IndexKey()) avoidTags := inputRes.GetAvoidTags()
setScore := SetCandidateScoreBySchedtag setScore(u, c, preferCountMap, true) setScore(u, c, avoidCountMap, false) } func (p *BaseSchedtagPredicate) OnSelectEnd(sp ISchedtagPredicateInstance, u *core.Unit, c core.Candidater, count int64) { inputRes := p.GetInputResourcesMap(c.IndexKey()) output := u.GetAllocatedResource(c.IndexKey()) inputs := sp.GetInputs(u) for idx, res := range inputRes { selRes := p.selectResource(sp, c, inputs[idx], res) sortRes := newSortCandidateResource(sp, selRes) sort.Sort(sortRes) //log.Debugf("sort result: %s", sortRes.DebugString()) sp.AddSelectResult(idx, inputs[idx], sortRes.res, output) } } type sortCandidateResource struct { predicate ISchedtagPredicateInstance res []ISchedtagCandidateResource } func newSortCandidateResource(predicate ISchedtagPredicateInstance, res []ISchedtagCandidateResource) *sortCandidateResource { return &sortCandidateResource{ predicate: predicate, res: res, } } func (s *sortCandidateResource) Len() int { return len(s.res) } func (s *sortCandidateResource) DebugString() string { var debugStr string for _, i := range s.res { debugStr = fmt.Sprintf("%s %d", debugStr, s.predicate.GetCandidateResourceSortScore(i)) } return debugStr } // desc order func (s *sortCandidateResource) Less(i, j int) bool { res1, res2 := s.res[i], s.res[j] v1 := s.predicate.GetCandidateResourceSortScore(res1) v2 := s.predicate.GetCandidateResourceSortScore(res2) return v1 > v2 } func (s *sortCandidateResource) Swap(i, j int) { s.res[i], s.res[j] = s.res[j], s.res[i] } func (p *BaseSchedtagPredicate) selectResource( sp ISchedtagPredicateInstance, c core.Candidater, input ISchedtagCustomer, ress []*PredicatedSchedtagResource, ) []ISchedtagCandidateResource { preferRes := make([]ISchedtagCandidateResource, 0) noTagRes := make([]ISchedtagCandidateResource, 0) avoidRes := make([]ISchedtagCandidateResource, 0) for _, res := range ress { if res.isNoTag() { noTagRes = append(noTagRes, res.ISchedtagCandidateResource) } else if res.hasPreferTags() { preferRes = append(preferRes, res.ISchedtagCandidateResource) } else if res.hasAvoidTags() { avoidRes = append(avoidRes, res.ISchedtagCandidateResource) } } for _, ress := range [][]ISchedtagCandidateResource{ preferRes, noTagRes, avoidRes, } { if len(ress) == 0 { continue } if ret := sp.DoSelect(c, input, ress); ret != nil { return ret } } return nil }
preferTags := inputRes.GetPreferTags() avoidCountMap := GetSchedtagCount(avoidTags, resTags, api.AggregateStrategyAvoid) preferCountMap := GetSchedtagCount(preferTags, resTags, api.AggregateStrategyPrefer)
random_line_split
adminEventTitle.js
import React, { useEffect, useState } from 'react' import { actionsStore } from '../../../redux/actions' import '../title/title.css' import './adminEventTitle.css' import $ from 'jquery' import keys from '../../../config/env/keys' // import logo from '../assets/logo.jpg' import arrow from '../../../assets/Polygon 24@2x.png' import ReactPlayer from 'react-player' import { Modal, Button } from 'react-bootstrap' import CreateEvent from '../../events/createEvent/createEvent' import { connect } from 'react-redux' import red from '../../../assets/red.png' import yellow from '../../../assets/yellow.png' import pink from '../../../assets/pink.png' import black from '../../../assets/black.png' import gray from '../../../assets/gray.png' import lightBlue from '../../../assets/lightBlue.png' import lightBlue2 from '../../../assets/lightBlue2.png' import orange from '../../../assets/orange.png' import pink2 from '../../../assets/pink2.png' import purple1 from '../../../assets/purple1.png' import purple2 from '../../../assets/purple2.png' import turquoise from '../../../assets/turquoise.png' import { subscribe } from '../../../redux/middlweare/crud' import AllEvents from '../../events/allEvents/allEvents' import FooterEventsGallery from '../../footer/footerEventsGallery'; import UploadImageFromConfigurator from '../../Configurator/uploadImageFromConfigurator'; import uploadIcon from '../../../assets/upload.png'; import { FontAwesomeIcon } from "@fortawesome/react-fontawesome"; import Subscribe from '../../subscribe/subscribe' function mapStateToProps(state) { // red #86F3FF document.documentElement.style.setProperty('--Button-color', state.pageSettings.page.eventsButtonColor); document.documentElement.style.setProperty('--align-text', state.editHeader.header.eventsPageAlignment); // state.settings.settings.eventsButtonColor // document.documentElement.style.setProperty('--Page-color',state.settings.eventsPageColor);
site: state.site, pagesettings: state.pageSettings.page, headersettings: state.editHeader.header, subscribesettings: state.editSubscription.subscribe, // (לחלק לכמה רדיוסרים) // text-align נתונים מהשרת................................ } } const mapDispatchToProps = (dispatch) => ({ changeTitleText: (e) => { dispatch(actionsStore.setTitleText(e)); }, changeBodyText: (e) => { dispatch(actionsStore.setBodyText(e)) }, changeCurrentComponent: (e) => { dispatch(actionsStore.setCurrentComponent(e)) }, setLoaderUploadShow: (bool, imageOrLogo) => dispatch(actionsStore.setLoaderUploadShow({ bool: bool, imageOrLogo: imageOrLogo })), changeImage: (url) => { dispatch(actionsStore.setImage(url)) }, setLoaderUploadShow: (bool, imageOrLogo) => dispatch(actionsStore.setLoaderUploadShow({ bool: bool, imageOrLogo: imageOrLogo })), changeLogo: (url) => dispatch(actionsStore.setLogo(url)) // addAllEvents: (events) => dispatch(actionsStore.addAllEvents(events)), }) export default connect(mapStateToProps, mapDispatchToProps)(function AdminEventTitle(props) { const { pagesettings, headersettings, subscribesettings, changeTitleText, changeBodyText, changeCurrentComponent } = props; const img = { '#ad60ff': purple1, '#4f40d0': purple2, '#ff53f7': pink, '#ff62b2': pink2, '#fa5252': red, '#ff803f': orange, '#faee3a': yellow, '#424149': black, '#9f9cb5': gray, '#63f597': turquoise, '#54b9ff': lightBlue, '#51e7fb': lightBlue2 } // const display = true;//ימלא נתונים בפרופס מהרידאקס אם מעונין שיציג כותרת // const [settings, setSettings] = useState({ eventsPageTitle: 'welcome to leader event', picteventsPageImageure: '', eventsPageDescription: 'Don’t Act So Surprised, Your Highness. You Weren’t On Any Mercy Mission This Time. Seve…', amountEventsInRow: '3' });//ימלא נתונים מהפרופס מהרידאקס את ההגדרות.. const [showing, setShowing] = useState(false); const [uploadImg, setUploadImg] = useState(false); var myImg = new Image(); function setHeightAndWidth() { var size; myImg.src = headersettings.eventsPageImageOrVideo; console.log("@@" + myImg.width / myImg.height + "@@") size = myImg.width / myImg.height < 1.5 ? myImg.width / myImg.height * 21 : myImg.width / myImg.height < 2 ? myImg.width / myImg.height * 17 : myImg.width / myImg.height * 12; size += "vw"; var inputHeight = myImg.width / myImg.height < 1.5 ? 24 : myImg.width / myImg.height < 2 ? 20 : 16; inputHeight += "vh"; console.log("myImg.width ", myImg.width, " myImg.height ", myImg.height) console.log("@@" + size + "@@") if (size == "NaNvw") { size = "30vw" } document.documentElement.style.setProperty('--image-width', size); document.documentElement.style.setProperty('--input-height', inputHeight); } const changeImage = (e) => { props.setLoaderUploadShow(true, 'image'); const TokenToString = document.cookie && document.cookie.includes('devJwt') ? document.cookie .split(';') .filter(s => s.includes('devJwt'))[0] .split('=') .pop() : null const userName = window.location.pathname.split('/')[1] const file = e.target.files[0]; var myFile = new FormData(); myFile.append("file", file); $.ajax({ type: "POST", url: `${keys.API_FILE}/${userName}/upload`, headers: { Authorization: TokenToString }, data: myFile, processData: false, contentType: false, success: (data) => { // alert("upload success"); props.changeImage(data.data.url); }, error: function (err) { alert('please try again later'); }, }); } const changeLogoImage = (e) => { props.setLoaderUploadShow(true, "logo"); const TokenToString = document.cookie && document.cookie.includes('devJwt') ? document.cookie .split(';') .filter(s => s.includes('devJwt'))[0] .split('=') .pop() : null const userName = window.location.pathname.split('/')[1] const file = e.target.files[0]; var myFile = new FormData(); myFile.append("file", file); $.ajax({ type: "POST", url: `${keys.API_FILE}/${userName}/upload`, headers: { Authorization: TokenToString }, data: myFile, processData: false, contentType: false, success: (data) => { // alert("upload success"); props.changeLogo(data.data.url); }, error: function (err) { alert('please try again later'); }, }); } useEffect(() => { if (headersettings) { setHeightAndWidth() setFontsize() } }, [headersettings]) function checkImg() { let x = headersettings.eventsPageImageOrVideo.replace(/[{()}]/g, ''); if (x.match(/\w+\.(jpg|jpeg|gif|png|tiff|bmp)$/)) { return true; } else { return false; } } function changeToHeaderComponent() { changeCurrentComponent('Edit Header') } function changeToPageSettingsComponent() { changeCurrentComponent('Page Settings') } function setUpload() { setUploadImg(!uploadImg) } function setFontsize() { debugger var height, len = headersettings.eventsPageTitle.length; height = Math.ceil(len / 15) * 7; if (height < 25) { height += "vh"; console.log("-- ", height, " --"); document.documentElement.style.setProperty('--title-height', height); } let textLength = headersettings.eventsPageTitle.length let textSize = 5 const baseSize = 8 if (Math.ceil(len / 15) >= 2) { textSize = textSize - 1; if (Math.ceil(len / 15) >= 3) { textSize = textSize - 1 if (Math.ceil(len / 15) >= 4) { textSize = textSize - 1 } } } document.documentElement.style.setProperty('--font-size-title-admin', `${textSize}vw`); } return ( <> <div className="container-fluid adminEventTitle" > <div className="row adminTitleDiv" id='showHeader'> <img className="myImg titleImgColor" src={img[pagesettings.eventsPageColor]} onClick={changeToPageSettingsComponent}></img> <label htmlFor='filelogo' className="adminLogoLabel"> <img className="adminMylogo" src={headersettings.eventsPageLogo} onClick={changeToHeaderComponent}></img> <div className="adminLogoIconDiv" onClick={changeToHeaderComponent}> <FontAwesomeIcon id='angle-right' className='iconCloudUpload uploadLogo' icon={['fas', 'cloud-upload-alt']} ></FontAwesomeIcon> </div> </label> <input type="file" name="file" accept="image/*" id="filelogo" className="adminInputfileLogo" onChange={changeLogoImage} /> <div className="col-3 adminTitleAndDescription"> <textarea className="adminEventTitletitleH1" // onKeyPress={(e) => e.key == 'Enter' && e.target.value.includes('\n') && e.preventDefault()} onChange={(e) => changeTitleText(e.target.value)} onClick={changeToHeaderComponent} value={headersettings.eventsPageTitle} // rows="2" // size="14" maxLength="90" // style={{ textAlign: 'left' }} placeholder={headersettings.eventsPageTitle} onFocus={(e) => e.target.select()} >{headersettings.eventsPageTitle} </textarea> <textarea className="adminEventDescription" // onKeyPress={(e) => e.key == 'Enter' && e.target.value.includes('\n') && e.preventDefault()} onChange={(e) => changeBodyText(e.target.value)} onClick={changeToHeaderComponent} value={headersettings.eventsPageDescription} rows="5" cols="35" maxLength="140" // style={{ textAlign: 'left' }} placeholder={headersettings.eventsPageDescription} onFocus={(e) => e.target.select()} >{headersettings.eventsPageTitle} </textarea> <div className="subscribeDivfromAdminTitle"> <Subscribe /> </div> </div> <div className="wrapAdminImgOrVieo col-5 d-flex justify-content-center"> <label htmlFor='file' className="adminImgLabel"> <div className="adminImgOrVieo d-flex justify-content-center" align="center" onClick={changeToHeaderComponent}> {/* <img src={uploadIcon} height="100%" width="100%" class="adminUpload"></img> */} {checkImg() === true ? <img className="myImg" id="imageInTitle" src={headersettings.eventsPageImageOrVideo} heigt="100%" width="100%" ></img> : <ReactPlayer width='100%' height='45vh' className="video_or_picture" url={headersettings.eventsPageImageOrVideo} controls={true} /> } <div className="UIiconDivAdmin d-flex justify-content-center"> <FontAwesomeIcon id='angle-right' className='iconCloudUpload uploadImgAdmin' icon={['fas', 'cloud-upload-alt']} ></FontAwesomeIcon> </div> </div> </label> <input type="file" name="file" accept="image/*" id="file" className="adminInputfile" onChange={changeImage} onClick={changeToHeaderComponent} /> </div> </div> </div> <div className="container-fluid adminEvnetsUnderFilter"> <div className="row" > <AllEvents style={{ zIndex: 1 }} sentBy={"admin"}></AllEvents> </div> <div > <FooterEventsGallery /></div> </div> </> ) })
return {
random_line_split
adminEventTitle.js
import React, { useEffect, useState } from 'react' import { actionsStore } from '../../../redux/actions' import '../title/title.css' import './adminEventTitle.css' import $ from 'jquery' import keys from '../../../config/env/keys' // import logo from '../assets/logo.jpg' import arrow from '../../../assets/Polygon 24@2x.png' import ReactPlayer from 'react-player' import { Modal, Button } from 'react-bootstrap' import CreateEvent from '../../events/createEvent/createEvent' import { connect } from 'react-redux' import red from '../../../assets/red.png' import yellow from '../../../assets/yellow.png' import pink from '../../../assets/pink.png' import black from '../../../assets/black.png' import gray from '../../../assets/gray.png' import lightBlue from '../../../assets/lightBlue.png' import lightBlue2 from '../../../assets/lightBlue2.png' import orange from '../../../assets/orange.png' import pink2 from '../../../assets/pink2.png' import purple1 from '../../../assets/purple1.png' import purple2 from '../../../assets/purple2.png' import turquoise from '../../../assets/turquoise.png' import { subscribe } from '../../../redux/middlweare/crud' import AllEvents from '../../events/allEvents/allEvents' import FooterEventsGallery from '../../footer/footerEventsGallery'; import UploadImageFromConfigurator from '../../Configurator/uploadImageFromConfigurator'; import uploadIcon from '../../../assets/upload.png'; import { FontAwesomeIcon } from "@fortawesome/react-fontawesome"; import Subscribe from '../../subscribe/subscribe' function mapStateToProps(state) { // red #86F3FF document.documentElement.style.setProperty('--Button-color', state.pageSettings.page.eventsButtonColor); document.documentElement.style.setProperty('--align-text', state.editHeader.header.eventsPageAlignment); // state.settings.settings.eventsButtonColor // document.documentElement.style.setProperty('--Page-color',state.settings.eventsPageColor); return { site: state.site, pagesettings: state.pageSettings.page, headersettings: state.editHeader.header, subscribesettings: state.editSubscription.subscribe, // (לחלק לכמה רדיוסרים) // text-align נתונים מהשרת................................ } } const mapDispatchToProps = (dispatch) => ({ changeTitleText: (e) => { dispatch(actionsStore.setTitleText(e)); }, changeBodyText: (e) => { dispatch(actionsStore.setBodyText(e)) }, changeCurrentComponent: (e) => { dispatch(actionsStore.setCurrentComponent(e)) }, setLoaderUploadShow: (bool, imageOrLogo) => dispatch(actionsStore.setLoaderUploadShow({ bool: bool, imageOrLogo: imageOrLogo })), changeImage: (url) => { dispatch(actionsStore.setImage(url)) }, setLoaderUploadShow: (bool, imageOrLogo) => dispatch(actionsStore.setLoaderUploadShow({ bool: bool, imageOrLogo: imageOrLogo })), changeLogo: (url) => dispatch(actionsStore.setLogo(url)) // addAllEvents: (events) => dispatch(actionsStore.addAllEvents(events)), }) export default connect(mapStateToProps, mapDispatchToProps)(function AdminEventTitle(props) { const { pagesettings, headersettings, subscribesettings, changeTitleText, changeBodyText, changeCurrentComponent } = props; const img = { '#ad60ff': purple1, '#4f40d0': purple2, '#ff53f7': pink, '#ff62b2': pink2, '#fa5252': red, '#ff803f': orange, '#faee3a': yellow, '#424149': black, '#9f9cb5': gray, '#63f597': turquoise, '#54b9ff': lightBlue, '#51e7fb': lightBlue2 } // const display = true;//ימלא נתונים בפרופס מהרידאקס אם מעונין שיציג כותרת // const [settings, setSettings] = useState({ eventsPageTitle: 'welcome to leader event', picteventsPageImageure: '', eventsPageDescription: 'Don’t Act So Surprised, Your Highness. You Weren’t On Any Mercy Mission This Time. Seve…', amountEventsInRow: '3' });//ימלא נתונים מהפרופס מהרידאקס את ההגדרות.. const [showing, setShowing] = useState(false); const [uploadImg, setUploadImg] = useState(false); var myImg = new Image(); function setHeightAndWidth() { var size; myImg.src = headersettings.eventsPageImageOrVideo; console.log("@@" + myImg.width / myImg.height + "@@") size = myImg.width / myImg.height < 1.5 ? myImg.width / myImg.height * 21 : myImg.width / myImg.height < 2 ? myImg.width / myImg.height * 17 : myImg.width / myImg.height * 12; size += "vw"; var inputHeight = myImg.width / myImg.height < 1.5 ? 24 : myImg.width / myImg.height < 2 ? 20 : 16; inputHeight += "vh"; console.log("myImg.width ", myImg.width, " myImg.height ", myImg.height) console.log("@@" + size + "@@") if (size == "NaNvw") { size = "30vw" } document.documentElement.style.setProperty('--image-width', size); document.documentElement.style.setProperty('--input-height', inputHeight); } const changeImage = (e) => { props.setLoaderUploadShow(true, 'image'); const TokenToString = document.cookie && document.cookie.includes('devJwt') ? document.cookie .split(';') .filter(s => s.includes('devJwt'))[0] .split('=') .pop() : null const userName = window.location.pathname.split('/')[1] const file = e.target.files[0]; var myFile = new FormData(); myFile.append("file", file); $.ajax({ type: "POST", url: `${keys.API_FILE}/${userName}/upload`, headers: { Authorization: TokenToString }, data: myFile, processData: false, contentType: false, success: (data) => { // alert("upload success"); props.changeImage(data.data.url); }, error: function (err) { alert('please try again later'); }, }); } const changeLogoImage = (e) => { props.setLoaderUploadShow(true, "logo"); const TokenToString = document.cookie && document.cookie.includes('devJwt') ? document.cookie .split(';') .filter(s => s.includes('devJwt'))[0] .split('=') .pop() : null const userName = window.location.pathname.split('/')[1] const file = e.target.files[0]; var myFile = new FormData(); myFile.append("file", file); $.ajax({ type: "POST", url: `${keys.API_FILE}/${userName}/upload`, headers: { Authorization: TokenToString }, data: myFile, processData: false, contentType: false, success: (data) => { // alert("upload success"); props.changeLogo(data.data.url); }, error: function (err) { alert('please try again later'); }, }); } useEffect(() => { if (headersettings) { setHeightAndWidth() setFontsize() } }, [headersettings]) function checkImg() { let x = headersettings.eventsPageImageOrVideo.replace(/[{()}]/g, ''); if (x.match(/\w+\.(jpg|jpeg|gif|png|tiff|bmp)$/)) { return true; } else { return false; } } function changeToHeaderComponent() { changeCurrentComponent('Edit Header') } function changeToPageSettingsComponent() { changeCurrentComponent('Page Settings') } function setUpload() { setUploadImg(!up
gger var height, len = headersettings.eventsPageTitle.length; height = Math.ceil(len / 15) * 7; if (height < 25) { height += "vh"; console.log("-- ", height, " --"); document.documentElement.style.setProperty('--title-height', height); } let textLength = headersettings.eventsPageTitle.length let textSize = 5 const baseSize = 8 if (Math.ceil(len / 15) >= 2) { textSize = textSize - 1; if (Math.ceil(len / 15) >= 3) { textSize = textSize - 1 if (Math.ceil(len / 15) >= 4) { textSize = textSize - 1 } } } document.documentElement.style.setProperty('--font-size-title-admin', `${textSize}vw`); } return ( <> <div className="container-fluid adminEventTitle" > <div className="row adminTitleDiv" id='showHeader'> <img className="myImg titleImgColor" src={img[pagesettings.eventsPageColor]} onClick={changeToPageSettingsComponent}></img> <label htmlFor='filelogo' className="adminLogoLabel"> <img className="adminMylogo" src={headersettings.eventsPageLogo} onClick={changeToHeaderComponent}></img> <div className="adminLogoIconDiv" onClick={changeToHeaderComponent}> <FontAwesomeIcon id='angle-right' className='iconCloudUpload uploadLogo' icon={['fas', 'cloud-upload-alt']} ></FontAwesomeIcon> </div> </label> <input type="file" name="file" accept="image/*" id="filelogo" className="adminInputfileLogo" onChange={changeLogoImage} /> <div className="col-3 adminTitleAndDescription"> <textarea className="adminEventTitletitleH1" // onKeyPress={(e) => e.key == 'Enter' && e.target.value.includes('\n') && e.preventDefault()} onChange={(e) => changeTitleText(e.target.value)} onClick={changeToHeaderComponent} value={headersettings.eventsPageTitle} // rows="2" // size="14" maxLength="90" // style={{ textAlign: 'left' }} placeholder={headersettings.eventsPageTitle} onFocus={(e) => e.target.select()} >{headersettings.eventsPageTitle} </textarea> <textarea className="adminEventDescription" // onKeyPress={(e) => e.key == 'Enter' && e.target.value.includes('\n') && e.preventDefault()} onChange={(e) => changeBodyText(e.target.value)} onClick={changeToHeaderComponent} value={headersettings.eventsPageDescription} rows="5" cols="35" maxLength="140" // style={{ textAlign: 'left' }} placeholder={headersettings.eventsPageDescription} onFocus={(e) => e.target.select()} >{headersettings.eventsPageTitle} </textarea> <div className="subscribeDivfromAdminTitle"> <Subscribe /> </div> </div> <div className="wrapAdminImgOrVieo col-5 d-flex justify-content-center"> <label htmlFor='file' className="adminImgLabel"> <div className="adminImgOrVieo d-flex justify-content-center" align="center" onClick={changeToHeaderComponent}> {/* <img src={uploadIcon} height="100%" width="100%" class="adminUpload"></img> */} {checkImg() === true ? <img className="myImg" id="imageInTitle" src={headersettings.eventsPageImageOrVideo} heigt="100%" width="100%" ></img> : <ReactPlayer width='100%' height='45vh' className="video_or_picture" url={headersettings.eventsPageImageOrVideo} controls={true} /> } <div className="UIiconDivAdmin d-flex justify-content-center"> <FontAwesomeIcon id='angle-right' className='iconCloudUpload uploadImgAdmin' icon={['fas', 'cloud-upload-alt']} ></FontAwesomeIcon> </div> </div> </label> <input type="file" name="file" accept="image/*" id="file" className="adminInputfile" onChange={changeImage} onClick={changeToHeaderComponent} /> </div> </div> </div> <div className="container-fluid adminEvnetsUnderFilter"> <div className="row" > <AllEvents style={{ zIndex: 1 }} sentBy={"admin"}></AllEvents> </div> <div > <FooterEventsGallery /></div> </div> </> ) })
loadImg) } function setFontsize() { debu
identifier_body
adminEventTitle.js
import React, { useEffect, useState } from 'react' import { actionsStore } from '../../../redux/actions' import '../title/title.css' import './adminEventTitle.css' import $ from 'jquery' import keys from '../../../config/env/keys' // import logo from '../assets/logo.jpg' import arrow from '../../../assets/Polygon 24@2x.png' import ReactPlayer from 'react-player' import { Modal, Button } from 'react-bootstrap' import CreateEvent from '../../events/createEvent/createEvent' import { connect } from 'react-redux' import red from '../../../assets/red.png' import yellow from '../../../assets/yellow.png' import pink from '../../../assets/pink.png' import black from '../../../assets/black.png' import gray from '../../../assets/gray.png' import lightBlue from '../../../assets/lightBlue.png' import lightBlue2 from '../../../assets/lightBlue2.png' import orange from '../../../assets/orange.png' import pink2 from '../../../assets/pink2.png' import purple1 from '../../../assets/purple1.png' import purple2 from '../../../assets/purple2.png' import turquoise from '../../../assets/turquoise.png' import { subscribe } from '../../../redux/middlweare/crud' import AllEvents from '../../events/allEvents/allEvents' import FooterEventsGallery from '../../footer/footerEventsGallery'; import UploadImageFromConfigurator from '../../Configurator/uploadImageFromConfigurator'; import uploadIcon from '../../../assets/upload.png'; import { FontAwesomeIcon } from "@fortawesome/react-fontawesome"; import Subscribe from '../../subscribe/subscribe' function mapStateToProps(state) { // red #86F3FF document.documentElement.style.setProperty('--Button-color', state.pageSettings.page.eventsButtonColor); document.documentElement.style.setProperty('--align-text', state.editHeader.header.eventsPageAlignment); // state.settings.settings.eventsButtonColor // document.documentElement.style.setProperty('--Page-color',state.settings.eventsPageColor); return { site: state.site, pagesettings: state.pageSettings.page, headersettings: state.editHeader.header, subscribesettings: state.editSubscription.subscribe, // (לחלק לכמה רדיוסרים) // text-align נתונים מהשרת................................ } } const mapDispatchToProps = (dispatch) => ({ changeTitleText: (e) => { dispatch(actionsStore.setTitleText(e)); }, changeBodyText: (e) => { dispatch(actionsStore.setBodyText(e)) }, changeCurrentComponent: (e) => { dispatch(actionsStore.setCurrentComponent(e)) }, setLoaderUploadShow: (bool, imageOrLogo) => dispatch(actionsStore.setLoaderUploadShow({ bool: bool, imageOrLogo: imageOrLogo })), changeImage: (url) => { dispatch(actionsStore.setImage(url)) }, setLoaderUploadShow: (bool, imageOrLogo) => dispatch(actionsStore.setLoaderUploadShow({ bool: bool, imageOrLogo: imageOrLogo })), changeLogo: (url) => dispatch(actionsStore.setLogo(url)) // addAllEvents: (events) => dispatch(actionsStore.addAllEvents(events)), }) export default connect(mapStateToProps, mapDispatchToProps)(function AdminEventTitle(props) { const { pagesettings, headersettings, subscribesettings, changeTitleText, changeBodyText, changeCurrentComponent } = props; const img = { '#ad60ff': purple1, '#4f40d0': purple2, '#ff53f7': pink, '#ff62b2': pink2, '#fa5252': red, '#ff803f': orange, '#faee3a': yellow, '#424149': black, '#9f9cb5': gray, '#63f597': turquoise, '#54b9ff': lightBlue, '#51e7fb': lightBlue2 } // const display = true;//ימלא נתונים בפרופס מהרידאקס אם מעונין שיציג כותרת // const [settings, setSettings] = useState({ eventsPageTitle: 'welcome to leader event', picteventsPageImageure: '', eventsPageDescription: 'Don’t Act So Surprised, Your Highness. You Weren’t On Any Mercy Mission This Time. Seve…', amountEventsInRow: '3' });//ימלא נתונים מהפרופס מהרידאקס את ההגדרות.. const [showing, setShowing] = useState(false); const [uploadImg, setUploadImg] = useState(false); var myImg = new Image(); function setHeightAndWidth() { var size; myImg.src = headersettings.eventsPageImageOrVideo; console.log("@@" + myImg.width / myImg.height + "@@") size = myImg.width / myImg.height < 1.5 ? myImg.width / myImg.height * 21 : myImg.width / myImg.height < 2 ? myImg.width / myImg.height * 17 : myImg.width / myImg.height * 12; size += "vw"; var inputHeight = myImg.width / myImg.height < 1.5 ? 24 : myImg.width / myImg.height < 2 ? 20 : 16; inputHeight += "vh"; console.log("myImg.width ", myImg.width, " myImg.height ", myImg.height) console.log("@@" + size + "@@") if (size == "NaNvw") { size = "30vw" } document.documentElement.style.setProperty('--image-width', size); document.documentElement.style.setProperty('--input-height', inputHeight); } const changeImage = (e) => { props.setLoaderUploadShow(true, 'image'); const TokenToString = document.cookie && document.cookie.includes('devJwt') ? document.cookie .split(';') .filter(s => s.includes('devJwt'))[0] .split('=') .pop() : null const userName = window.location.pathname.split('/')[1] const file = e.target.files[0]; var myFile = new FormData(); myFile.append("file", file); $.ajax({ type: "POST", url: `${keys.API_FILE}/${userName}/upload`, headers: { Authorization: TokenToString }, data: myFile, processData: false, contentType: false, success: (data) => { // alert("upload success"); props.changeImage(data.data.url); }, error: function (err) { alert('please try again later'); }, }); } const changeLogoImage = (e) => { props.setLoaderUploadShow(true, "logo"); const TokenToString = document.cookie && document.cookie.includes('devJwt') ? document.cookie .split(';') .filter(s => s.includes('devJwt'))[0] .split('=') .pop() : null const userName = window.location.pathname.split('/')[1] const file = e.target.files[0]; var myFile = new FormData(); myFile.append("file", file); $.ajax({ type: "POST", url: `${keys.API_FILE}/${userName}/upload`, headers: { Authorization: TokenToString }, data: myFile, processData: false, contentType: false, success: (data) => { // alert("upload success"); props.changeLogo(data.data.url); }, error: function (err) { alert('please try again later'); }, }); } useEffect(() => { if (headersettings) { setHeightAndWidth() setFontsize() } }, [headersettings]) function checkImg() { let x = headersettings.eventsPageImageOrVideo.replace(/[{()}]/g, ''); if (x.match(/\w+\.(jpg|jpeg|gif|png|tiff|bmp)$/)) { return true; } else { return false; } } function changeToHeaderComponent() { changeCurrentComponent('Edit Header') } function changeToPageSettingsComponent() { changeCurrentComponent('Page Settings') } function setUpload() { setUploadImg(!uploadImg) } function setFontsize() { debugger var height, len = headersettings.eventsPageTitle.length; height = Math.ceil(len / 15) * 7; if (height < 25) { height += "vh"; console.log("-- ", height, " --"); document.documentElement.style.setProperty('--title-height', height); } let textLength = headersettings.eventsPageTitle.length let textSize = 5 const baseSize = 8 if (Math.ceil(len / 15) >= 2) { textSize = textSize - 1; if (Math.ceil(len / 15) >= 3) { textSize = textSize - 1 if (Math.ceil(len / 15) >= 4) {
} return ( <> <div className="container-fluid adminEventTitle" > <div className="row adminTitleDiv" id='showHeader'> <img className="myImg titleImgColor" src={img[pagesettings.eventsPageColor]} onClick={changeToPageSettingsComponent}></img> <label htmlFor='filelogo' className="adminLogoLabel"> <img className="adminMylogo" src={headersettings.eventsPageLogo} onClick={changeToHeaderComponent}></img> <div className="adminLogoIconDiv" onClick={changeToHeaderComponent}> <FontAwesomeIcon id='angle-right' className='iconCloudUpload uploadLogo' icon={['fas', 'cloud-upload-alt']} ></FontAwesomeIcon> </div> </label> <input type="file" name="file" accept="image/*" id="filelogo" className="adminInputfileLogo" onChange={changeLogoImage} /> <div className="col-3 adminTitleAndDescription"> <textarea className="adminEventTitletitleH1" // onKeyPress={(e) => e.key == 'Enter' && e.target.value.includes('\n') && e.preventDefault()} onChange={(e) => changeTitleText(e.target.value)} onClick={changeToHeaderComponent} value={headersettings.eventsPageTitle} // rows="2" // size="14" maxLength="90" // style={{ textAlign: 'left' }} placeholder={headersettings.eventsPageTitle} onFocus={(e) => e.target.select()} >{headersettings.eventsPageTitle} </textarea> <textarea className="adminEventDescription" // onKeyPress={(e) => e.key == 'Enter' && e.target.value.includes('\n') && e.preventDefault()} onChange={(e) => changeBodyText(e.target.value)} onClick={changeToHeaderComponent} value={headersettings.eventsPageDescription} rows="5" cols="35" maxLength="140" // style={{ textAlign: 'left' }} placeholder={headersettings.eventsPageDescription} onFocus={(e) => e.target.select()} >{headersettings.eventsPageTitle} </textarea> <div className="subscribeDivfromAdminTitle"> <Subscribe /> </div> </div> <div className="wrapAdminImgOrVieo col-5 d-flex justify-content-center"> <label htmlFor='file' className="adminImgLabel"> <div className="adminImgOrVieo d-flex justify-content-center" align="center" onClick={changeToHeaderComponent}> {/* <img src={uploadIcon} height="100%" width="100%" class="adminUpload"></img> */} {checkImg() === true ? <img className="myImg" id="imageInTitle" src={headersettings.eventsPageImageOrVideo} heigt="100%" width="100%" ></img> : <ReactPlayer width='100%' height='45vh' className="video_or_picture" url={headersettings.eventsPageImageOrVideo} controls={true} /> } <div className="UIiconDivAdmin d-flex justify-content-center"> <FontAwesomeIcon id='angle-right' className='iconCloudUpload uploadImgAdmin' icon={['fas', 'cloud-upload-alt']} ></FontAwesomeIcon> </div> </div> </label> <input type="file" name="file" accept="image/*" id="file" className="adminInputfile" onChange={changeImage} onClick={changeToHeaderComponent} /> </div> </div> </div> <div className="container-fluid adminEvnetsUnderFilter"> <div className="row" > <AllEvents style={{ zIndex: 1 }} sentBy={"admin"}></AllEvents> </div> <div > <FooterEventsGallery /></div> </div> </> ) })
textSize = textSize - 1 } } } document.documentElement.style.setProperty('--font-size-title-admin', `${textSize}vw`);
conditional_block
adminEventTitle.js
import React, { useEffect, useState } from 'react' import { actionsStore } from '../../../redux/actions' import '../title/title.css' import './adminEventTitle.css' import $ from 'jquery' import keys from '../../../config/env/keys' // import logo from '../assets/logo.jpg' import arrow from '../../../assets/Polygon 24@2x.png' import ReactPlayer from 'react-player' import { Modal, Button } from 'react-bootstrap' import CreateEvent from '../../events/createEvent/createEvent' import { connect } from 'react-redux' import red from '../../../assets/red.png' import yellow from '../../../assets/yellow.png' import pink from '../../../assets/pink.png' import black from '../../../assets/black.png' import gray from '../../../assets/gray.png' import lightBlue from '../../../assets/lightBlue.png' import lightBlue2 from '../../../assets/lightBlue2.png' import orange from '../../../assets/orange.png' import pink2 from '../../../assets/pink2.png' import purple1 from '../../../assets/purple1.png' import purple2 from '../../../assets/purple2.png' import turquoise from '../../../assets/turquoise.png' import { subscribe } from '../../../redux/middlweare/crud' import AllEvents from '../../events/allEvents/allEvents' import FooterEventsGallery from '../../footer/footerEventsGallery'; import UploadImageFromConfigurator from '../../Configurator/uploadImageFromConfigurator'; import uploadIcon from '../../../assets/upload.png'; import { FontAwesomeIcon } from "@fortawesome/react-fontawesome"; import Subscribe from '../../subscribe/subscribe' function mapStateToProps(state) { // red #86F3FF document.documentElement.style.setProperty('--Button-color', state.pageSettings.page.eventsButtonColor); document.documentElement.style.setProperty('--align-text', state.editHeader.header.eventsPageAlignment); // state.settings.settings.eventsButtonColor // document.documentElement.style.setProperty('--Page-color',state.settings.eventsPageColor); return { site: state.site, pagesettings: state.pageSettings.page, headersettings: state.editHeader.header, subscribesettings: state.editSubscription.subscribe, // (לחלק לכמה רדיוסרים) // text-align נתונים מהשרת................................ } } const mapDispatchToProps = (dispatch) => ({ changeTitleText: (e) => { dispatch(actionsStore.setTitleText(e)); }, changeBodyText: (e) => { dispatch(actionsStore.setBodyText(e)) }, changeCurrentComponent: (e) => { dispatch(actionsStore.setCurrentComponent(e)) }, setLoaderUploadShow: (bool, imageOrLogo) => dispatch(actionsStore.setLoaderUploadShow({ bool: bool, imageOrLogo: imageOrLogo })), changeImage: (url) => { dispatch(actionsStore.setImage(url)) }, setLoaderUploadShow: (bool, imageOrLogo) => dispatch(actionsStore.setLoaderUploadShow({ bool: bool, imageOrLogo: imageOrLogo })), changeLogo: (url) => dispatch(actionsStore.setLogo(url)) // addAllEvents: (events) => dispatch(actionsStore.addAllEvents(events)), }) export default connect(mapStateToProps, mapDispatchToProps)(function AdminEventTitle(props) { const { pagesettings, headersettings, subscribesettings, changeTitleText, changeBodyText, changeCurrentComponent } = props; const img = { '#ad60ff': purple1, '#4f40d0': purple2, '#ff53f7': pink, '#ff62b2': pink2, '#fa5252': red, '#ff803f': orange, '#faee3a': yellow, '#424149': black, '#9f9cb5': gray, '#63f597': turquoise, '#54b9ff': lightBlue, '#51e7fb': lightBlue2 } // const display = true;//ימלא נתונים בפרופס מהרידאקס אם מעונין שיציג כותרת // const [settings, setSettings] = useState({ eventsPageTitle: 'welcome to leader event', picteventsPageImageure: '', eventsPageDescription: 'Don’t Act So Surprised, Your Highness. You Weren’t On Any Mercy Mission This Time. Seve…', amountEventsInRow: '3' });//ימלא נתונים מהפרופס מהרידאקס את ההגדרות.. const [showing, setShowing] = useState(false); const [uploadImg, setUploadImg] = useState(false); var myImg = new Image(); function setHeightAndWidth() { var size; myImg.src = headersettings.eventsPageImageOrVideo;
+ myImg.width / myImg.height + "@@") size = myImg.width / myImg.height < 1.5 ? myImg.width / myImg.height * 21 : myImg.width / myImg.height < 2 ? myImg.width / myImg.height * 17 : myImg.width / myImg.height * 12; size += "vw"; var inputHeight = myImg.width / myImg.height < 1.5 ? 24 : myImg.width / myImg.height < 2 ? 20 : 16; inputHeight += "vh"; console.log("myImg.width ", myImg.width, " myImg.height ", myImg.height) console.log("@@" + size + "@@") if (size == "NaNvw") { size = "30vw" } document.documentElement.style.setProperty('--image-width', size); document.documentElement.style.setProperty('--input-height', inputHeight); } const changeImage = (e) => { props.setLoaderUploadShow(true, 'image'); const TokenToString = document.cookie && document.cookie.includes('devJwt') ? document.cookie .split(';') .filter(s => s.includes('devJwt'))[0] .split('=') .pop() : null const userName = window.location.pathname.split('/')[1] const file = e.target.files[0]; var myFile = new FormData(); myFile.append("file", file); $.ajax({ type: "POST", url: `${keys.API_FILE}/${userName}/upload`, headers: { Authorization: TokenToString }, data: myFile, processData: false, contentType: false, success: (data) => { // alert("upload success"); props.changeImage(data.data.url); }, error: function (err) { alert('please try again later'); }, }); } const changeLogoImage = (e) => { props.setLoaderUploadShow(true, "logo"); const TokenToString = document.cookie && document.cookie.includes('devJwt') ? document.cookie .split(';') .filter(s => s.includes('devJwt'))[0] .split('=') .pop() : null const userName = window.location.pathname.split('/')[1] const file = e.target.files[0]; var myFile = new FormData(); myFile.append("file", file); $.ajax({ type: "POST", url: `${keys.API_FILE}/${userName}/upload`, headers: { Authorization: TokenToString }, data: myFile, processData: false, contentType: false, success: (data) => { // alert("upload success"); props.changeLogo(data.data.url); }, error: function (err) { alert('please try again later'); }, }); } useEffect(() => { if (headersettings) { setHeightAndWidth() setFontsize() } }, [headersettings]) function checkImg() { let x = headersettings.eventsPageImageOrVideo.replace(/[{()}]/g, ''); if (x.match(/\w+\.(jpg|jpeg|gif|png|tiff|bmp)$/)) { return true; } else { return false; } } function changeToHeaderComponent() { changeCurrentComponent('Edit Header') } function changeToPageSettingsComponent() { changeCurrentComponent('Page Settings') } function setUpload() { setUploadImg(!uploadImg) } function setFontsize() { debugger var height, len = headersettings.eventsPageTitle.length; height = Math.ceil(len / 15) * 7; if (height < 25) { height += "vh"; console.log("-- ", height, " --"); document.documentElement.style.setProperty('--title-height', height); } let textLength = headersettings.eventsPageTitle.length let textSize = 5 const baseSize = 8 if (Math.ceil(len / 15) >= 2) { textSize = textSize - 1; if (Math.ceil(len / 15) >= 3) { textSize = textSize - 1 if (Math.ceil(len / 15) >= 4) { textSize = textSize - 1 } } } document.documentElement.style.setProperty('--font-size-title-admin', `${textSize}vw`); } return ( <> <div className="container-fluid adminEventTitle" > <div className="row adminTitleDiv" id='showHeader'> <img className="myImg titleImgColor" src={img[pagesettings.eventsPageColor]} onClick={changeToPageSettingsComponent}></img> <label htmlFor='filelogo' className="adminLogoLabel"> <img className="adminMylogo" src={headersettings.eventsPageLogo} onClick={changeToHeaderComponent}></img> <div className="adminLogoIconDiv" onClick={changeToHeaderComponent}> <FontAwesomeIcon id='angle-right' className='iconCloudUpload uploadLogo' icon={['fas', 'cloud-upload-alt']} ></FontAwesomeIcon> </div> </label> <input type="file" name="file" accept="image/*" id="filelogo" className="adminInputfileLogo" onChange={changeLogoImage} /> <div className="col-3 adminTitleAndDescription"> <textarea className="adminEventTitletitleH1" // onKeyPress={(e) => e.key == 'Enter' && e.target.value.includes('\n') && e.preventDefault()} onChange={(e) => changeTitleText(e.target.value)} onClick={changeToHeaderComponent} value={headersettings.eventsPageTitle} // rows="2" // size="14" maxLength="90" // style={{ textAlign: 'left' }} placeholder={headersettings.eventsPageTitle} onFocus={(e) => e.target.select()} >{headersettings.eventsPageTitle} </textarea> <textarea className="adminEventDescription" // onKeyPress={(e) => e.key == 'Enter' && e.target.value.includes('\n') && e.preventDefault()} onChange={(e) => changeBodyText(e.target.value)} onClick={changeToHeaderComponent} value={headersettings.eventsPageDescription} rows="5" cols="35" maxLength="140" // style={{ textAlign: 'left' }} placeholder={headersettings.eventsPageDescription} onFocus={(e) => e.target.select()} >{headersettings.eventsPageTitle} </textarea> <div className="subscribeDivfromAdminTitle"> <Subscribe /> </div> </div> <div className="wrapAdminImgOrVieo col-5 d-flex justify-content-center"> <label htmlFor='file' className="adminImgLabel"> <div className="adminImgOrVieo d-flex justify-content-center" align="center" onClick={changeToHeaderComponent}> {/* <img src={uploadIcon} height="100%" width="100%" class="adminUpload"></img> */} {checkImg() === true ? <img className="myImg" id="imageInTitle" src={headersettings.eventsPageImageOrVideo} heigt="100%" width="100%" ></img> : <ReactPlayer width='100%' height='45vh' className="video_or_picture" url={headersettings.eventsPageImageOrVideo} controls={true} /> } <div className="UIiconDivAdmin d-flex justify-content-center"> <FontAwesomeIcon id='angle-right' className='iconCloudUpload uploadImgAdmin' icon={['fas', 'cloud-upload-alt']} ></FontAwesomeIcon> </div> </div> </label> <input type="file" name="file" accept="image/*" id="file" className="adminInputfile" onChange={changeImage} onClick={changeToHeaderComponent} /> </div> </div> </div> <div className="container-fluid adminEvnetsUnderFilter"> <div className="row" > <AllEvents style={{ zIndex: 1 }} sentBy={"admin"}></AllEvents> </div> <div > <FooterEventsGallery /></div> </div> </> ) })
console.log("@@"
identifier_name
index.go
package main import ( "fmt" "strconv" "strings" "time" ) import "os" import "bufio" import "math" const RADAR_DIST = 4 const MOVE_DIST = 4 const UNKNOWN_THRESHOLD = 0.40 const COOLDOwN_RADAR = 5 const COOLDOwN_TRAP = 5 /********************************************************************************** * Functions that the std library doesn't have *********************************************************************************/ func abs(n int) int { if n < 0 { return -n } return n } func max(a, b int) int { if a > b { return a } return b } func min(a, b int) int { if a < b { return a } return b } func clamp(x, low, high int) int { return min(max(x, low), high) } /********************************************************************************** * Data structures *********************************************************************************/ type World struct { width, height int } var world World func (w World) ArrayIndex(x, y int) int { return y * w.width + x } func (w World) ArrayIndexC(coord Coord) int { return coord.y * w.width + coord.x } func (w World) Center() Coord { return Coord{w.width / 2, w.height / 2} } func (w World) Size() int { return w.width * w.height } /** * A pair of ints for coordinates **/ type Coord struct { x, y int } func (c Coord) String() string { return fmt.Sprintf("(%d, %d)", c.x, c.y) } type Cmd int const ( CMD_WAIT Cmd = 0 CMD_MOVE Cmd = 1 CMD_DIG Cmd = 2 CMD_RADAR Cmd = 3 CMD_TRAP Cmd = 4 ) type Item int const ( ITEM_NONE Item = -1 ITEM_RADAR Item = 2 ITEM_TRAP Item = 3 ITEM_ORE Item = 4 ) type Object int const ( OBJ_ME Object = 0 OBJ_OPPONENT Object = 1 OBJ_RADAR Object = 2 OBJ_TRAP Object = 3 ) type Robot struct { id int pos Coord cmd Cmd targetPos Coord item Item digIntent Item } func (r Robot) String() string { return fmt.Sprintf("Robot (%d) { pos: %s, cmd: %d, targetPos: %s, item: %d}", r.id, r.pos, r.cmd, r.targetPos, r.item) } func (r *Robot) Wait() { r.cmd = CMD_WAIT } func (r *Robot) MoveTo(pos Coord) { r.cmd = CMD_MOVE r.targetPos.x = pos.x r.targetPos.y = pos.y } func (r *Robot) Move(dx, dy int) { r.cmd = CMD_MOVE r.targetPos.x = clamp(r.pos.x + dx, 0, world.width) r.targetPos.y = clamp(r.pos.y + dy, 0, world.height) } func (r *Robot) ReturnToHQ() { r.cmd = CMD_MOVE r.targetPos.x = 0 r.targetPos.y = r.pos.y } func (r Robot) IsAtHQ() bool { return r.pos.x == 0 } func (r *Robot) Dig(pos Coord, intent Item) { r.cmd = CMD_DIG r.targetPos.x = pos.x r.targetPos.y = pos.y r.digIntent = intent } func (r *Robot) RequestRadar() { r.cmd = CMD_RADAR } func (r *Robot) RequestTrap() { r.cmd = CMD_TRAP } func (r Robot) GetCommand() string { if r.cmd == CMD_WAIT { return "WAIT" } if r.cmd == CMD_MOVE { return fmt.Sprintf("MOVE %d %d", r.targetPos.x, r.targetPos.y) } if r.cmd == CMD_DIG { return fmt.Sprintf("DIG %d %d", r.targetPos.x, r.targetPos.y) } if r.cmd == CMD_RADAR { return "REQUEST RADAR" } if r.cmd == CMD_TRAP { return "REQUEST TRAP" } fmt.Fprintf(os.Stderr, "Unknown command type for robot! %d, id: %d", r.cmd, r.id) return "WAIT" } func (r Robot) IsCmdValid(ores *[]int) (valid bool) { if r.cmd == CMD_DIG { if r.digIntent == ITEM_RADAR { return r.item == ITEM_RADAR } if r.digIntent == ITEM_TRAP { return r.item == ITEM_TRAP } if r.digIntent == ITEM_ORE { // Can only have 1 ore (for now?) if r.item == ITEM_ORE { return false } valid = (*ores)[world.ArrayIndexC(r.targetPos)] > 0 (*ores)[world.ArrayIndexC(r.targetPos)]-- // If it goes negative, that's okay return } } if r.cmd == CMD_MOVE { return r.pos != r.targetPos } if r.cmd == CMD_RADAR { return r.item != ITEM_RADAR } if r.cmd == CMD_TRAP { return r.item != ITEM_TRAP } return false } func (r Robot) IsDead() bool { return r.pos.x == -1 } /********************************************************************************** * Utility functions *********************************************************************************/ /** * The Manhattan distance between 2 coordinates **/ func dist(p1, p2 Coord) int { return abs(p1.x-p2.x) + abs(p1.y-p2.y) } /** * The Manhattan distance between 2 coordinates for digging (1 less) **/ func digDist(p1, p2 Coord) int { return max(abs(p1.x-p2.x)+abs(p1.y-p2.y)-1, 0) } /** * The distance in turns between 2 coordinates **/ func turnDist(p1, p2 Coord) int { return int(math.Ceil(float64(dist(p1, p2)) / MOVE_DIST)) } /** * The distance in turns between 2 coordinates for digging **/ func
(p1, p2 Coord) int { return int(math.Ceil(float64(digDist(p1, p2)) / MOVE_DIST)) } /********************************************************************************** * Serious business here *********************************************************************************/ func calculateCellRadarValues(unknowns []int) []int { radarValues := make([]int, world.Size()) for j := 0; j < world.height; j++ { for i := 1; i < world.width; i++ { cell := Coord{i, j} for n := max(j-RADAR_DIST, 0); n <= min(j+RADAR_DIST, world.height-1); n++ { for m := max(i-RADAR_DIST, 1); m <= min(i+RADAR_DIST, world.width-1); m++ { if dist(cell, Coord{m, n}) > RADAR_DIST { continue } radarValues[world.ArrayIndexC(cell)] += unknowns[world.ArrayIndex(m,n)] } } } } return radarValues } func calculateBestRadarPosition(unknowns []int, pos Coord) (best Coord) { radarValues := calculateCellRadarValues(unknowns) closest := world.width // furthest point largestValue := 0 // lowest value for j := 0; j < world.height; j++ { for i := 1; i < world.width; i++ { value := radarValues[world.ArrayIndex(i, j)] if value > largestValue { largestValue = value best = Coord{i, j} closest = i } else if value == largestValue { newCoord := Coord{i, j} // Pick the closest to HQ if i < closest { best = newCoord closest = i } } } } return best } /********************************************************************************** * Parsing Logic *********************************************************************************/ func ParseScore(scanner *bufio.Scanner) (myScore, opponentScore int) { scanner.Scan() fmt.Sscan(scanner.Text(), &myScore, &opponentScore) return myScore, opponentScore } func ParseWorld(scanner *bufio.Scanner, ores *[]int, unknowns* []int) (numOres, numUnknowns int){ numOres = 0 numUnknowns = 0 for j := 0; j < world.height; j++ { scanner.Scan() inputs := strings.Split(scanner.Text(), " ") for i := 0; i < world.width; i++ { // ore: amount of ore or "?" if unknown // hole: 1 if cell has a hole ore, err := strconv.Atoi(inputs[2*i]) if err != nil { (*ores)[world.ArrayIndex(i,j)] = 0 (*unknowns)[world.ArrayIndex(i,j)] = 1 numUnknowns++ } else { (*ores)[world.ArrayIndex(i,j)] = ore (*unknowns)[world.ArrayIndex(i,j)] = 0 numOres += ore } hole, _ := strconv.ParseInt(inputs[2*i+1], 10, 32) _ = hole } } return numOres, numUnknowns } func ParseEntities(scanner *bufio.Scanner, robots *[]Robot, ores *[]int) (radarCooldown, trapCooldown int){ // entityCount: number of entities visible to you // radarCooldown: turns left until a new radar can be requested // trapCooldown: turns left until a new trap can be requested var entityCount int scanner.Scan() fmt.Sscan(scanner.Text(), &entityCount, &radarCooldown, &trapCooldown) myRobot_i := 0 for i := 0; i < entityCount; i++ { // id: unique id of the entity // type: 0 for your robot, 1 for other robot, 2 for radar, 3 for trap // y: position of the entity // item: if this entity is a robot, the item it is carrying (-1 for NONE, 2 for RADAR, 3 for TRAP, 4 for ORE) var id, objType, x, y, item int scanner.Scan() fmt.Sscan(scanner.Text(), &id, &objType, &x, &y, &item) if Object(objType) == OBJ_ME { robot := &(*robots)[myRobot_i] robot.id = id robot.pos.x = x robot.pos.y = y robot.item = Item(item) myRobot_i++ } else if Object(objType) == OBJ_TRAP { (*ores)[world.ArrayIndex(x,y)] = 0 } } return radarCooldown, trapCooldown } /********************************************************************************** * Main loop *********************************************************************************/ func main() { scanner := bufio.NewScanner(os.Stdin) scanner.Buffer(make([]byte, 1000000), 1000000) // height: size of the map var width, height int scanner.Scan() fmt.Sscan(scanner.Text(), &width, &height) world = World{width, height} ores := make([]int, width*height) unknowns := make([]int, width*height) robots := make([]Robot, 5) for { // Keep timing of each turn start := time.Now() // Parse input myScore, opponentScore := ParseScore(scanner) numOre, numUnknowns := ParseWorld(scanner, &ores, &unknowns) radarCooldown, trapCooldown := ParseEntities(scanner, &robots, &ores) _, _, _, _ = myScore, opponentScore, radarCooldown, trapCooldown percentUnknown := float64(numUnknowns) / float64(world.Size()) unknownThresholdPassed := percentUnknown < UNKNOWN_THRESHOLD var needCmds []int for i := 0; i < len(robots); i++ { robot := &robots[i] if robot.IsCmdValid(&ores) { continue } if robot.item == ITEM_ORE { robot.ReturnToHQ() } else if robot.IsAtHQ() { if robot.item == ITEM_RADAR { robot.Dig(calculateBestRadarPosition(unknowns, robot.pos), ITEM_RADAR) } else if robot.item == ITEM_TRAP { // nothing right now } else if radarCooldown == 0 && !unknownThresholdPassed { robot.RequestRadar() radarCooldown = COOLDOwN_RADAR //} else if trapCooldown == 0 { // calculate spot to place trap //trapCooldown = COOLDOwN_TRAP } else { needCmds = append(needCmds, i) } } else { needCmds = append(needCmds, i) } } if len(needCmds) > 0 { cmdIndex := 0 if numOre > 0 { for i := 0; i < width && numOre > 0 && cmdIndex < len(needCmds); i++ { for j := 0; j < height && numOre > 0 && cmdIndex < len(needCmds); j++ { cellOres := ores[world.ArrayIndex(i, j)] if cellOres != 0 { for k := 0; k < cellOres && cmdIndex < len(needCmds); k++ { robots[needCmds[cmdIndex]].Dig(Coord{i, j}, ITEM_ORE) cmdIndex++ numOre-- } } } } } else { robots[needCmds[cmdIndex]].ReturnToHQ() cmdIndex++ } for ; cmdIndex < len(needCmds); cmdIndex++ { robots[needCmds[cmdIndex]].MoveTo(world.Center()) } } for i := 0; i < len(robots); i++ { fmt.Println(robots[i].GetCommand()) // WAIT|MOVE x y|DIG x y|REQUEST item } elapsed := time.Since(start) fmt.Fprintf(os.Stderr, "%v elapsed in turn", elapsed) } }
digTurnDist
identifier_name
index.go
package main import ( "fmt" "strconv" "strings" "time" ) import "os" import "bufio" import "math" const RADAR_DIST = 4 const MOVE_DIST = 4 const UNKNOWN_THRESHOLD = 0.40 const COOLDOwN_RADAR = 5 const COOLDOwN_TRAP = 5 /********************************************************************************** * Functions that the std library doesn't have *********************************************************************************/ func abs(n int) int
func max(a, b int) int { if a > b { return a } return b } func min(a, b int) int { if a < b { return a } return b } func clamp(x, low, high int) int { return min(max(x, low), high) } /********************************************************************************** * Data structures *********************************************************************************/ type World struct { width, height int } var world World func (w World) ArrayIndex(x, y int) int { return y * w.width + x } func (w World) ArrayIndexC(coord Coord) int { return coord.y * w.width + coord.x } func (w World) Center() Coord { return Coord{w.width / 2, w.height / 2} } func (w World) Size() int { return w.width * w.height } /** * A pair of ints for coordinates **/ type Coord struct { x, y int } func (c Coord) String() string { return fmt.Sprintf("(%d, %d)", c.x, c.y) } type Cmd int const ( CMD_WAIT Cmd = 0 CMD_MOVE Cmd = 1 CMD_DIG Cmd = 2 CMD_RADAR Cmd = 3 CMD_TRAP Cmd = 4 ) type Item int const ( ITEM_NONE Item = -1 ITEM_RADAR Item = 2 ITEM_TRAP Item = 3 ITEM_ORE Item = 4 ) type Object int const ( OBJ_ME Object = 0 OBJ_OPPONENT Object = 1 OBJ_RADAR Object = 2 OBJ_TRAP Object = 3 ) type Robot struct { id int pos Coord cmd Cmd targetPos Coord item Item digIntent Item } func (r Robot) String() string { return fmt.Sprintf("Robot (%d) { pos: %s, cmd: %d, targetPos: %s, item: %d}", r.id, r.pos, r.cmd, r.targetPos, r.item) } func (r *Robot) Wait() { r.cmd = CMD_WAIT } func (r *Robot) MoveTo(pos Coord) { r.cmd = CMD_MOVE r.targetPos.x = pos.x r.targetPos.y = pos.y } func (r *Robot) Move(dx, dy int) { r.cmd = CMD_MOVE r.targetPos.x = clamp(r.pos.x + dx, 0, world.width) r.targetPos.y = clamp(r.pos.y + dy, 0, world.height) } func (r *Robot) ReturnToHQ() { r.cmd = CMD_MOVE r.targetPos.x = 0 r.targetPos.y = r.pos.y } func (r Robot) IsAtHQ() bool { return r.pos.x == 0 } func (r *Robot) Dig(pos Coord, intent Item) { r.cmd = CMD_DIG r.targetPos.x = pos.x r.targetPos.y = pos.y r.digIntent = intent } func (r *Robot) RequestRadar() { r.cmd = CMD_RADAR } func (r *Robot) RequestTrap() { r.cmd = CMD_TRAP } func (r Robot) GetCommand() string { if r.cmd == CMD_WAIT { return "WAIT" } if r.cmd == CMD_MOVE { return fmt.Sprintf("MOVE %d %d", r.targetPos.x, r.targetPos.y) } if r.cmd == CMD_DIG { return fmt.Sprintf("DIG %d %d", r.targetPos.x, r.targetPos.y) } if r.cmd == CMD_RADAR { return "REQUEST RADAR" } if r.cmd == CMD_TRAP { return "REQUEST TRAP" } fmt.Fprintf(os.Stderr, "Unknown command type for robot! %d, id: %d", r.cmd, r.id) return "WAIT" } func (r Robot) IsCmdValid(ores *[]int) (valid bool) { if r.cmd == CMD_DIG { if r.digIntent == ITEM_RADAR { return r.item == ITEM_RADAR } if r.digIntent == ITEM_TRAP { return r.item == ITEM_TRAP } if r.digIntent == ITEM_ORE { // Can only have 1 ore (for now?) if r.item == ITEM_ORE { return false } valid = (*ores)[world.ArrayIndexC(r.targetPos)] > 0 (*ores)[world.ArrayIndexC(r.targetPos)]-- // If it goes negative, that's okay return } } if r.cmd == CMD_MOVE { return r.pos != r.targetPos } if r.cmd == CMD_RADAR { return r.item != ITEM_RADAR } if r.cmd == CMD_TRAP { return r.item != ITEM_TRAP } return false } func (r Robot) IsDead() bool { return r.pos.x == -1 } /********************************************************************************** * Utility functions *********************************************************************************/ /** * The Manhattan distance between 2 coordinates **/ func dist(p1, p2 Coord) int { return abs(p1.x-p2.x) + abs(p1.y-p2.y) } /** * The Manhattan distance between 2 coordinates for digging (1 less) **/ func digDist(p1, p2 Coord) int { return max(abs(p1.x-p2.x)+abs(p1.y-p2.y)-1, 0) } /** * The distance in turns between 2 coordinates **/ func turnDist(p1, p2 Coord) int { return int(math.Ceil(float64(dist(p1, p2)) / MOVE_DIST)) } /** * The distance in turns between 2 coordinates for digging **/ func digTurnDist(p1, p2 Coord) int { return int(math.Ceil(float64(digDist(p1, p2)) / MOVE_DIST)) } /********************************************************************************** * Serious business here *********************************************************************************/ func calculateCellRadarValues(unknowns []int) []int { radarValues := make([]int, world.Size()) for j := 0; j < world.height; j++ { for i := 1; i < world.width; i++ { cell := Coord{i, j} for n := max(j-RADAR_DIST, 0); n <= min(j+RADAR_DIST, world.height-1); n++ { for m := max(i-RADAR_DIST, 1); m <= min(i+RADAR_DIST, world.width-1); m++ { if dist(cell, Coord{m, n}) > RADAR_DIST { continue } radarValues[world.ArrayIndexC(cell)] += unknowns[world.ArrayIndex(m,n)] } } } } return radarValues } func calculateBestRadarPosition(unknowns []int, pos Coord) (best Coord) { radarValues := calculateCellRadarValues(unknowns) closest := world.width // furthest point largestValue := 0 // lowest value for j := 0; j < world.height; j++ { for i := 1; i < world.width; i++ { value := radarValues[world.ArrayIndex(i, j)] if value > largestValue { largestValue = value best = Coord{i, j} closest = i } else if value == largestValue { newCoord := Coord{i, j} // Pick the closest to HQ if i < closest { best = newCoord closest = i } } } } return best } /********************************************************************************** * Parsing Logic *********************************************************************************/ func ParseScore(scanner *bufio.Scanner) (myScore, opponentScore int) { scanner.Scan() fmt.Sscan(scanner.Text(), &myScore, &opponentScore) return myScore, opponentScore } func ParseWorld(scanner *bufio.Scanner, ores *[]int, unknowns* []int) (numOres, numUnknowns int){ numOres = 0 numUnknowns = 0 for j := 0; j < world.height; j++ { scanner.Scan() inputs := strings.Split(scanner.Text(), " ") for i := 0; i < world.width; i++ { // ore: amount of ore or "?" if unknown // hole: 1 if cell has a hole ore, err := strconv.Atoi(inputs[2*i]) if err != nil { (*ores)[world.ArrayIndex(i,j)] = 0 (*unknowns)[world.ArrayIndex(i,j)] = 1 numUnknowns++ } else { (*ores)[world.ArrayIndex(i,j)] = ore (*unknowns)[world.ArrayIndex(i,j)] = 0 numOres += ore } hole, _ := strconv.ParseInt(inputs[2*i+1], 10, 32) _ = hole } } return numOres, numUnknowns } func ParseEntities(scanner *bufio.Scanner, robots *[]Robot, ores *[]int) (radarCooldown, trapCooldown int){ // entityCount: number of entities visible to you // radarCooldown: turns left until a new radar can be requested // trapCooldown: turns left until a new trap can be requested var entityCount int scanner.Scan() fmt.Sscan(scanner.Text(), &entityCount, &radarCooldown, &trapCooldown) myRobot_i := 0 for i := 0; i < entityCount; i++ { // id: unique id of the entity // type: 0 for your robot, 1 for other robot, 2 for radar, 3 for trap // y: position of the entity // item: if this entity is a robot, the item it is carrying (-1 for NONE, 2 for RADAR, 3 for TRAP, 4 for ORE) var id, objType, x, y, item int scanner.Scan() fmt.Sscan(scanner.Text(), &id, &objType, &x, &y, &item) if Object(objType) == OBJ_ME { robot := &(*robots)[myRobot_i] robot.id = id robot.pos.x = x robot.pos.y = y robot.item = Item(item) myRobot_i++ } else if Object(objType) == OBJ_TRAP { (*ores)[world.ArrayIndex(x,y)] = 0 } } return radarCooldown, trapCooldown } /********************************************************************************** * Main loop *********************************************************************************/ func main() { scanner := bufio.NewScanner(os.Stdin) scanner.Buffer(make([]byte, 1000000), 1000000) // height: size of the map var width, height int scanner.Scan() fmt.Sscan(scanner.Text(), &width, &height) world = World{width, height} ores := make([]int, width*height) unknowns := make([]int, width*height) robots := make([]Robot, 5) for { // Keep timing of each turn start := time.Now() // Parse input myScore, opponentScore := ParseScore(scanner) numOre, numUnknowns := ParseWorld(scanner, &ores, &unknowns) radarCooldown, trapCooldown := ParseEntities(scanner, &robots, &ores) _, _, _, _ = myScore, opponentScore, radarCooldown, trapCooldown percentUnknown := float64(numUnknowns) / float64(world.Size()) unknownThresholdPassed := percentUnknown < UNKNOWN_THRESHOLD var needCmds []int for i := 0; i < len(robots); i++ { robot := &robots[i] if robot.IsCmdValid(&ores) { continue } if robot.item == ITEM_ORE { robot.ReturnToHQ() } else if robot.IsAtHQ() { if robot.item == ITEM_RADAR { robot.Dig(calculateBestRadarPosition(unknowns, robot.pos), ITEM_RADAR) } else if robot.item == ITEM_TRAP { // nothing right now } else if radarCooldown == 0 && !unknownThresholdPassed { robot.RequestRadar() radarCooldown = COOLDOwN_RADAR //} else if trapCooldown == 0 { // calculate spot to place trap //trapCooldown = COOLDOwN_TRAP } else { needCmds = append(needCmds, i) } } else { needCmds = append(needCmds, i) } } if len(needCmds) > 0 { cmdIndex := 0 if numOre > 0 { for i := 0; i < width && numOre > 0 && cmdIndex < len(needCmds); i++ { for j := 0; j < height && numOre > 0 && cmdIndex < len(needCmds); j++ { cellOres := ores[world.ArrayIndex(i, j)] if cellOres != 0 { for k := 0; k < cellOres && cmdIndex < len(needCmds); k++ { robots[needCmds[cmdIndex]].Dig(Coord{i, j}, ITEM_ORE) cmdIndex++ numOre-- } } } } } else { robots[needCmds[cmdIndex]].ReturnToHQ() cmdIndex++ } for ; cmdIndex < len(needCmds); cmdIndex++ { robots[needCmds[cmdIndex]].MoveTo(world.Center()) } } for i := 0; i < len(robots); i++ { fmt.Println(robots[i].GetCommand()) // WAIT|MOVE x y|DIG x y|REQUEST item } elapsed := time.Since(start) fmt.Fprintf(os.Stderr, "%v elapsed in turn", elapsed) } }
{ if n < 0 { return -n } return n }
identifier_body
index.go
package main import ( "fmt" "strconv" "strings" "time" ) import "os" import "bufio" import "math" const RADAR_DIST = 4 const MOVE_DIST = 4 const UNKNOWN_THRESHOLD = 0.40 const COOLDOwN_RADAR = 5 const COOLDOwN_TRAP = 5 /********************************************************************************** * Functions that the std library doesn't have *********************************************************************************/ func abs(n int) int { if n < 0 { return -n } return n } func max(a, b int) int { if a > b { return a } return b } func min(a, b int) int { if a < b { return a } return b } func clamp(x, low, high int) int { return min(max(x, low), high) } /********************************************************************************** * Data structures *********************************************************************************/ type World struct { width, height int } var world World func (w World) ArrayIndex(x, y int) int { return y * w.width + x } func (w World) ArrayIndexC(coord Coord) int { return coord.y * w.width + coord.x } func (w World) Center() Coord { return Coord{w.width / 2, w.height / 2} } func (w World) Size() int { return w.width * w.height } /** * A pair of ints for coordinates **/ type Coord struct { x, y int } func (c Coord) String() string { return fmt.Sprintf("(%d, %d)", c.x, c.y) } type Cmd int const ( CMD_WAIT Cmd = 0 CMD_MOVE Cmd = 1 CMD_DIG Cmd = 2 CMD_RADAR Cmd = 3 CMD_TRAP Cmd = 4 ) type Item int const ( ITEM_NONE Item = -1 ITEM_RADAR Item = 2 ITEM_TRAP Item = 3 ITEM_ORE Item = 4 ) type Object int const ( OBJ_ME Object = 0 OBJ_OPPONENT Object = 1 OBJ_RADAR Object = 2 OBJ_TRAP Object = 3 ) type Robot struct { id int pos Coord cmd Cmd targetPos Coord item Item digIntent Item } func (r Robot) String() string { return fmt.Sprintf("Robot (%d) { pos: %s, cmd: %d, targetPos: %s, item: %d}", r.id, r.pos, r.cmd, r.targetPos, r.item) } func (r *Robot) Wait() { r.cmd = CMD_WAIT } func (r *Robot) MoveTo(pos Coord) { r.cmd = CMD_MOVE r.targetPos.x = pos.x r.targetPos.y = pos.y } func (r *Robot) Move(dx, dy int) { r.cmd = CMD_MOVE r.targetPos.x = clamp(r.pos.x + dx, 0, world.width) r.targetPos.y = clamp(r.pos.y + dy, 0, world.height) } func (r *Robot) ReturnToHQ() { r.cmd = CMD_MOVE r.targetPos.x = 0 r.targetPos.y = r.pos.y } func (r Robot) IsAtHQ() bool { return r.pos.x == 0 } func (r *Robot) Dig(pos Coord, intent Item) { r.cmd = CMD_DIG r.targetPos.x = pos.x r.targetPos.y = pos.y r.digIntent = intent } func (r *Robot) RequestRadar() { r.cmd = CMD_RADAR } func (r *Robot) RequestTrap() { r.cmd = CMD_TRAP } func (r Robot) GetCommand() string { if r.cmd == CMD_WAIT { return "WAIT" } if r.cmd == CMD_MOVE { return fmt.Sprintf("MOVE %d %d", r.targetPos.x, r.targetPos.y) } if r.cmd == CMD_DIG { return fmt.Sprintf("DIG %d %d", r.targetPos.x, r.targetPos.y) } if r.cmd == CMD_RADAR { return "REQUEST RADAR" } if r.cmd == CMD_TRAP { return "REQUEST TRAP" } fmt.Fprintf(os.Stderr, "Unknown command type for robot! %d, id: %d", r.cmd, r.id) return "WAIT" } func (r Robot) IsCmdValid(ores *[]int) (valid bool) { if r.cmd == CMD_DIG { if r.digIntent == ITEM_RADAR { return r.item == ITEM_RADAR } if r.digIntent == ITEM_TRAP { return r.item == ITEM_TRAP } if r.digIntent == ITEM_ORE { // Can only have 1 ore (for now?) if r.item == ITEM_ORE { return false } valid = (*ores)[world.ArrayIndexC(r.targetPos)] > 0 (*ores)[world.ArrayIndexC(r.targetPos)]-- // If it goes negative, that's okay return } } if r.cmd == CMD_MOVE { return r.pos != r.targetPos } if r.cmd == CMD_RADAR { return r.item != ITEM_RADAR } if r.cmd == CMD_TRAP { return r.item != ITEM_TRAP } return false } func (r Robot) IsDead() bool { return r.pos.x == -1 } /********************************************************************************** * Utility functions *********************************************************************************/ /** * The Manhattan distance between 2 coordinates **/ func dist(p1, p2 Coord) int { return abs(p1.x-p2.x) + abs(p1.y-p2.y) } /** * The Manhattan distance between 2 coordinates for digging (1 less) **/ func digDist(p1, p2 Coord) int { return max(abs(p1.x-p2.x)+abs(p1.y-p2.y)-1, 0) } /** * The distance in turns between 2 coordinates **/ func turnDist(p1, p2 Coord) int { return int(math.Ceil(float64(dist(p1, p2)) / MOVE_DIST)) } /** * The distance in turns between 2 coordinates for digging **/ func digTurnDist(p1, p2 Coord) int { return int(math.Ceil(float64(digDist(p1, p2)) / MOVE_DIST)) } /********************************************************************************** * Serious business here *********************************************************************************/ func calculateCellRadarValues(unknowns []int) []int { radarValues := make([]int, world.Size()) for j := 0; j < world.height; j++ { for i := 1; i < world.width; i++ { cell := Coord{i, j} for n := max(j-RADAR_DIST, 0); n <= min(j+RADAR_DIST, world.height-1); n++ { for m := max(i-RADAR_DIST, 1); m <= min(i+RADAR_DIST, world.width-1); m++ { if dist(cell, Coord{m, n}) > RADAR_DIST { continue } radarValues[world.ArrayIndexC(cell)] += unknowns[world.ArrayIndex(m,n)] } } } } return radarValues } func calculateBestRadarPosition(unknowns []int, pos Coord) (best Coord) { radarValues := calculateCellRadarValues(unknowns) closest := world.width // furthest point largestValue := 0 // lowest value for j := 0; j < world.height; j++ { for i := 1; i < world.width; i++ { value := radarValues[world.ArrayIndex(i, j)] if value > largestValue { largestValue = value best = Coord{i, j} closest = i } else if value == largestValue { newCoord := Coord{i, j} // Pick the closest to HQ if i < closest { best = newCoord closest = i } } } } return best } /********************************************************************************** * Parsing Logic *********************************************************************************/ func ParseScore(scanner *bufio.Scanner) (myScore, opponentScore int) { scanner.Scan() fmt.Sscan(scanner.Text(), &myScore, &opponentScore) return myScore, opponentScore } func ParseWorld(scanner *bufio.Scanner, ores *[]int, unknowns* []int) (numOres, numUnknowns int){ numOres = 0 numUnknowns = 0 for j := 0; j < world.height; j++ { scanner.Scan() inputs := strings.Split(scanner.Text(), " ") for i := 0; i < world.width; i++ { // ore: amount of ore or "?" if unknown // hole: 1 if cell has a hole ore, err := strconv.Atoi(inputs[2*i]) if err != nil { (*ores)[world.ArrayIndex(i,j)] = 0 (*unknowns)[world.ArrayIndex(i,j)] = 1 numUnknowns++ } else { (*ores)[world.ArrayIndex(i,j)] = ore (*unknowns)[world.ArrayIndex(i,j)] = 0 numOres += ore } hole, _ := strconv.ParseInt(inputs[2*i+1], 10, 32) _ = hole } } return numOres, numUnknowns } func ParseEntities(scanner *bufio.Scanner, robots *[]Robot, ores *[]int) (radarCooldown, trapCooldown int){ // entityCount: number of entities visible to you // radarCooldown: turns left until a new radar can be requested // trapCooldown: turns left until a new trap can be requested var entityCount int scanner.Scan() fmt.Sscan(scanner.Text(), &entityCount, &radarCooldown, &trapCooldown) myRobot_i := 0 for i := 0; i < entityCount; i++ { // id: unique id of the entity // type: 0 for your robot, 1 for other robot, 2 for radar, 3 for trap // y: position of the entity // item: if this entity is a robot, the item it is carrying (-1 for NONE, 2 for RADAR, 3 for TRAP, 4 for ORE) var id, objType, x, y, item int scanner.Scan() fmt.Sscan(scanner.Text(), &id, &objType, &x, &y, &item) if Object(objType) == OBJ_ME {
robot := &(*robots)[myRobot_i] robot.id = id robot.pos.x = x robot.pos.y = y robot.item = Item(item) myRobot_i++ } else if Object(objType) == OBJ_TRAP { (*ores)[world.ArrayIndex(x,y)] = 0 } } return radarCooldown, trapCooldown } /********************************************************************************** * Main loop *********************************************************************************/ func main() { scanner := bufio.NewScanner(os.Stdin) scanner.Buffer(make([]byte, 1000000), 1000000) // height: size of the map var width, height int scanner.Scan() fmt.Sscan(scanner.Text(), &width, &height) world = World{width, height} ores := make([]int, width*height) unknowns := make([]int, width*height) robots := make([]Robot, 5) for { // Keep timing of each turn start := time.Now() // Parse input myScore, opponentScore := ParseScore(scanner) numOre, numUnknowns := ParseWorld(scanner, &ores, &unknowns) radarCooldown, trapCooldown := ParseEntities(scanner, &robots, &ores) _, _, _, _ = myScore, opponentScore, radarCooldown, trapCooldown percentUnknown := float64(numUnknowns) / float64(world.Size()) unknownThresholdPassed := percentUnknown < UNKNOWN_THRESHOLD var needCmds []int for i := 0; i < len(robots); i++ { robot := &robots[i] if robot.IsCmdValid(&ores) { continue } if robot.item == ITEM_ORE { robot.ReturnToHQ() } else if robot.IsAtHQ() { if robot.item == ITEM_RADAR { robot.Dig(calculateBestRadarPosition(unknowns, robot.pos), ITEM_RADAR) } else if robot.item == ITEM_TRAP { // nothing right now } else if radarCooldown == 0 && !unknownThresholdPassed { robot.RequestRadar() radarCooldown = COOLDOwN_RADAR //} else if trapCooldown == 0 { // calculate spot to place trap //trapCooldown = COOLDOwN_TRAP } else { needCmds = append(needCmds, i) } } else { needCmds = append(needCmds, i) } } if len(needCmds) > 0 { cmdIndex := 0 if numOre > 0 { for i := 0; i < width && numOre > 0 && cmdIndex < len(needCmds); i++ { for j := 0; j < height && numOre > 0 && cmdIndex < len(needCmds); j++ { cellOres := ores[world.ArrayIndex(i, j)] if cellOres != 0 { for k := 0; k < cellOres && cmdIndex < len(needCmds); k++ { robots[needCmds[cmdIndex]].Dig(Coord{i, j}, ITEM_ORE) cmdIndex++ numOre-- } } } } } else { robots[needCmds[cmdIndex]].ReturnToHQ() cmdIndex++ } for ; cmdIndex < len(needCmds); cmdIndex++ { robots[needCmds[cmdIndex]].MoveTo(world.Center()) } } for i := 0; i < len(robots); i++ { fmt.Println(robots[i].GetCommand()) // WAIT|MOVE x y|DIG x y|REQUEST item } elapsed := time.Since(start) fmt.Fprintf(os.Stderr, "%v elapsed in turn", elapsed) } }
random_line_split
index.go
package main import ( "fmt" "strconv" "strings" "time" ) import "os" import "bufio" import "math" const RADAR_DIST = 4 const MOVE_DIST = 4 const UNKNOWN_THRESHOLD = 0.40 const COOLDOwN_RADAR = 5 const COOLDOwN_TRAP = 5 /********************************************************************************** * Functions that the std library doesn't have *********************************************************************************/ func abs(n int) int { if n < 0 { return -n } return n } func max(a, b int) int { if a > b { return a } return b } func min(a, b int) int { if a < b { return a } return b } func clamp(x, low, high int) int { return min(max(x, low), high) } /********************************************************************************** * Data structures *********************************************************************************/ type World struct { width, height int } var world World func (w World) ArrayIndex(x, y int) int { return y * w.width + x } func (w World) ArrayIndexC(coord Coord) int { return coord.y * w.width + coord.x } func (w World) Center() Coord { return Coord{w.width / 2, w.height / 2} } func (w World) Size() int { return w.width * w.height } /** * A pair of ints for coordinates **/ type Coord struct { x, y int } func (c Coord) String() string { return fmt.Sprintf("(%d, %d)", c.x, c.y) } type Cmd int const ( CMD_WAIT Cmd = 0 CMD_MOVE Cmd = 1 CMD_DIG Cmd = 2 CMD_RADAR Cmd = 3 CMD_TRAP Cmd = 4 ) type Item int const ( ITEM_NONE Item = -1 ITEM_RADAR Item = 2 ITEM_TRAP Item = 3 ITEM_ORE Item = 4 ) type Object int const ( OBJ_ME Object = 0 OBJ_OPPONENT Object = 1 OBJ_RADAR Object = 2 OBJ_TRAP Object = 3 ) type Robot struct { id int pos Coord cmd Cmd targetPos Coord item Item digIntent Item } func (r Robot) String() string { return fmt.Sprintf("Robot (%d) { pos: %s, cmd: %d, targetPos: %s, item: %d}", r.id, r.pos, r.cmd, r.targetPos, r.item) } func (r *Robot) Wait() { r.cmd = CMD_WAIT } func (r *Robot) MoveTo(pos Coord) { r.cmd = CMD_MOVE r.targetPos.x = pos.x r.targetPos.y = pos.y } func (r *Robot) Move(dx, dy int) { r.cmd = CMD_MOVE r.targetPos.x = clamp(r.pos.x + dx, 0, world.width) r.targetPos.y = clamp(r.pos.y + dy, 0, world.height) } func (r *Robot) ReturnToHQ() { r.cmd = CMD_MOVE r.targetPos.x = 0 r.targetPos.y = r.pos.y } func (r Robot) IsAtHQ() bool { return r.pos.x == 0 } func (r *Robot) Dig(pos Coord, intent Item) { r.cmd = CMD_DIG r.targetPos.x = pos.x r.targetPos.y = pos.y r.digIntent = intent } func (r *Robot) RequestRadar() { r.cmd = CMD_RADAR } func (r *Robot) RequestTrap() { r.cmd = CMD_TRAP } func (r Robot) GetCommand() string { if r.cmd == CMD_WAIT { return "WAIT" } if r.cmd == CMD_MOVE
if r.cmd == CMD_DIG { return fmt.Sprintf("DIG %d %d", r.targetPos.x, r.targetPos.y) } if r.cmd == CMD_RADAR { return "REQUEST RADAR" } if r.cmd == CMD_TRAP { return "REQUEST TRAP" } fmt.Fprintf(os.Stderr, "Unknown command type for robot! %d, id: %d", r.cmd, r.id) return "WAIT" } func (r Robot) IsCmdValid(ores *[]int) (valid bool) { if r.cmd == CMD_DIG { if r.digIntent == ITEM_RADAR { return r.item == ITEM_RADAR } if r.digIntent == ITEM_TRAP { return r.item == ITEM_TRAP } if r.digIntent == ITEM_ORE { // Can only have 1 ore (for now?) if r.item == ITEM_ORE { return false } valid = (*ores)[world.ArrayIndexC(r.targetPos)] > 0 (*ores)[world.ArrayIndexC(r.targetPos)]-- // If it goes negative, that's okay return } } if r.cmd == CMD_MOVE { return r.pos != r.targetPos } if r.cmd == CMD_RADAR { return r.item != ITEM_RADAR } if r.cmd == CMD_TRAP { return r.item != ITEM_TRAP } return false } func (r Robot) IsDead() bool { return r.pos.x == -1 } /********************************************************************************** * Utility functions *********************************************************************************/ /** * The Manhattan distance between 2 coordinates **/ func dist(p1, p2 Coord) int { return abs(p1.x-p2.x) + abs(p1.y-p2.y) } /** * The Manhattan distance between 2 coordinates for digging (1 less) **/ func digDist(p1, p2 Coord) int { return max(abs(p1.x-p2.x)+abs(p1.y-p2.y)-1, 0) } /** * The distance in turns between 2 coordinates **/ func turnDist(p1, p2 Coord) int { return int(math.Ceil(float64(dist(p1, p2)) / MOVE_DIST)) } /** * The distance in turns between 2 coordinates for digging **/ func digTurnDist(p1, p2 Coord) int { return int(math.Ceil(float64(digDist(p1, p2)) / MOVE_DIST)) } /********************************************************************************** * Serious business here *********************************************************************************/ func calculateCellRadarValues(unknowns []int) []int { radarValues := make([]int, world.Size()) for j := 0; j < world.height; j++ { for i := 1; i < world.width; i++ { cell := Coord{i, j} for n := max(j-RADAR_DIST, 0); n <= min(j+RADAR_DIST, world.height-1); n++ { for m := max(i-RADAR_DIST, 1); m <= min(i+RADAR_DIST, world.width-1); m++ { if dist(cell, Coord{m, n}) > RADAR_DIST { continue } radarValues[world.ArrayIndexC(cell)] += unknowns[world.ArrayIndex(m,n)] } } } } return radarValues } func calculateBestRadarPosition(unknowns []int, pos Coord) (best Coord) { radarValues := calculateCellRadarValues(unknowns) closest := world.width // furthest point largestValue := 0 // lowest value for j := 0; j < world.height; j++ { for i := 1; i < world.width; i++ { value := radarValues[world.ArrayIndex(i, j)] if value > largestValue { largestValue = value best = Coord{i, j} closest = i } else if value == largestValue { newCoord := Coord{i, j} // Pick the closest to HQ if i < closest { best = newCoord closest = i } } } } return best } /********************************************************************************** * Parsing Logic *********************************************************************************/ func ParseScore(scanner *bufio.Scanner) (myScore, opponentScore int) { scanner.Scan() fmt.Sscan(scanner.Text(), &myScore, &opponentScore) return myScore, opponentScore } func ParseWorld(scanner *bufio.Scanner, ores *[]int, unknowns* []int) (numOres, numUnknowns int){ numOres = 0 numUnknowns = 0 for j := 0; j < world.height; j++ { scanner.Scan() inputs := strings.Split(scanner.Text(), " ") for i := 0; i < world.width; i++ { // ore: amount of ore or "?" if unknown // hole: 1 if cell has a hole ore, err := strconv.Atoi(inputs[2*i]) if err != nil { (*ores)[world.ArrayIndex(i,j)] = 0 (*unknowns)[world.ArrayIndex(i,j)] = 1 numUnknowns++ } else { (*ores)[world.ArrayIndex(i,j)] = ore (*unknowns)[world.ArrayIndex(i,j)] = 0 numOres += ore } hole, _ := strconv.ParseInt(inputs[2*i+1], 10, 32) _ = hole } } return numOres, numUnknowns } func ParseEntities(scanner *bufio.Scanner, robots *[]Robot, ores *[]int) (radarCooldown, trapCooldown int){ // entityCount: number of entities visible to you // radarCooldown: turns left until a new radar can be requested // trapCooldown: turns left until a new trap can be requested var entityCount int scanner.Scan() fmt.Sscan(scanner.Text(), &entityCount, &radarCooldown, &trapCooldown) myRobot_i := 0 for i := 0; i < entityCount; i++ { // id: unique id of the entity // type: 0 for your robot, 1 for other robot, 2 for radar, 3 for trap // y: position of the entity // item: if this entity is a robot, the item it is carrying (-1 for NONE, 2 for RADAR, 3 for TRAP, 4 for ORE) var id, objType, x, y, item int scanner.Scan() fmt.Sscan(scanner.Text(), &id, &objType, &x, &y, &item) if Object(objType) == OBJ_ME { robot := &(*robots)[myRobot_i] robot.id = id robot.pos.x = x robot.pos.y = y robot.item = Item(item) myRobot_i++ } else if Object(objType) == OBJ_TRAP { (*ores)[world.ArrayIndex(x,y)] = 0 } } return radarCooldown, trapCooldown } /********************************************************************************** * Main loop *********************************************************************************/ func main() { scanner := bufio.NewScanner(os.Stdin) scanner.Buffer(make([]byte, 1000000), 1000000) // height: size of the map var width, height int scanner.Scan() fmt.Sscan(scanner.Text(), &width, &height) world = World{width, height} ores := make([]int, width*height) unknowns := make([]int, width*height) robots := make([]Robot, 5) for { // Keep timing of each turn start := time.Now() // Parse input myScore, opponentScore := ParseScore(scanner) numOre, numUnknowns := ParseWorld(scanner, &ores, &unknowns) radarCooldown, trapCooldown := ParseEntities(scanner, &robots, &ores) _, _, _, _ = myScore, opponentScore, radarCooldown, trapCooldown percentUnknown := float64(numUnknowns) / float64(world.Size()) unknownThresholdPassed := percentUnknown < UNKNOWN_THRESHOLD var needCmds []int for i := 0; i < len(robots); i++ { robot := &robots[i] if robot.IsCmdValid(&ores) { continue } if robot.item == ITEM_ORE { robot.ReturnToHQ() } else if robot.IsAtHQ() { if robot.item == ITEM_RADAR { robot.Dig(calculateBestRadarPosition(unknowns, robot.pos), ITEM_RADAR) } else if robot.item == ITEM_TRAP { // nothing right now } else if radarCooldown == 0 && !unknownThresholdPassed { robot.RequestRadar() radarCooldown = COOLDOwN_RADAR //} else if trapCooldown == 0 { // calculate spot to place trap //trapCooldown = COOLDOwN_TRAP } else { needCmds = append(needCmds, i) } } else { needCmds = append(needCmds, i) } } if len(needCmds) > 0 { cmdIndex := 0 if numOre > 0 { for i := 0; i < width && numOre > 0 && cmdIndex < len(needCmds); i++ { for j := 0; j < height && numOre > 0 && cmdIndex < len(needCmds); j++ { cellOres := ores[world.ArrayIndex(i, j)] if cellOres != 0 { for k := 0; k < cellOres && cmdIndex < len(needCmds); k++ { robots[needCmds[cmdIndex]].Dig(Coord{i, j}, ITEM_ORE) cmdIndex++ numOre-- } } } } } else { robots[needCmds[cmdIndex]].ReturnToHQ() cmdIndex++ } for ; cmdIndex < len(needCmds); cmdIndex++ { robots[needCmds[cmdIndex]].MoveTo(world.Center()) } } for i := 0; i < len(robots); i++ { fmt.Println(robots[i].GetCommand()) // WAIT|MOVE x y|DIG x y|REQUEST item } elapsed := time.Since(start) fmt.Fprintf(os.Stderr, "%v elapsed in turn", elapsed) } }
{ return fmt.Sprintf("MOVE %d %d", r.targetPos.x, r.targetPos.y) }
conditional_block
main.rs
// main struggle problems in this section were 11 and 18, and to some extent, 12 and 14. 17 was annoying to debug, but not hard. extern crate timings_proc_macro; use timings_proc_macro::timings; #[timings] fn
() { let s: Vec<usize> = std::fs::read_to_string("src/e11.txt") .unwrap() .split_whitespace() .map(|n| n.parse::<usize>().unwrap()) .collect(); //println!("{:?}", s); // could just run with s, but let's build our 2d array. let mut v = [[0; 20]; 20]; (0..400).for_each(|i| v[i / 20][i % 20] = s[i]); //println!("{:?}", v); let mut big = 0; use itertools::Itertools; (0..20).cartesian_product(0..20).for_each(|(i, j)| { if i < 17 { // h_ let temp = v[i][j] * v[i + 1][j] * v[i + 2][j] * v[i + 3][j]; if temp > big { // println!( // "h_ new biggest: {} starting at: ({},{}), with init value {}:", // big, i, j, v[i][j] // ); big = temp } } if j < 17 { // v| let temp = v[i][j] * v[i][j + 1] * v[i][j + 2] * v[i][j + 3]; if temp > big { // println!( // "v| new biggest: {} starting at: ({},{}), with init value {}:", // big, i, j, v[i][j] // ); big = temp } } if i < 17 && j < 17 { // d\ let temp = v[i][j] * v[i + 1][j + 1] * v[i + 2][j + 2] * v[i + 3][j + 3]; if temp > big { // println!( // "d\\ new biggest: {} starting at: ({},{}), with init value {}:", // big, i, j, v[i][j], // ); big = temp } } if i < 17 && j > 2 { // d/ let temp = v[i][j] * v[i + 1][j - 1] * v[i + 2][j - 2] * v[i + 3][j - 3]; if temp > big { // println!( // "d/ new biggest: {} starting at: ({},{}), with init value {}:", // big, i, j, v[i][j] // ); big = temp } } }); println!("biggest: {}", big); } // v elegant: https://github.com/zacharydenton/euler/blob/master/011/grid.rs // 1. include_str!("grid.txt") I could be using this macro instead. // 2. .filter_map(|n| n.parse().ok()), well isn't that sweet. // 3. his solution collects the maximum value in each direction in an interesting way. Each element is k farther ahead than the current elem. h:1,v:20,d\:21,d/:19. This fails if the line crosses a boundary though. // What is the value of the first triangle number to have over five hundred divisors? #[timings] fn e12() { // entire problem is "count divisors". Naive soln sucks. Derive a soln. // Proposition. given X = p_1^a * p_2^b * ..., // N_factors(X) = (a+1)(b+1).... // now we only need to find the algebraic multiplicity of each prime divisor. let multiplicities = |input: usize| -> std::collections::HashMap<usize, usize> { let mut h = std::collections::HashMap::new(); let mut n = input; while n % 2 == 0 { let counter = h.entry(2).or_insert(0); *counter += 1; n /= 2; } let mut i = 3; while n > 1 { while n % i == 0 { let counter = h.entry(i).or_insert(0); *counter += 1; n /= i; } i += 2; } h }; let mut i = 1; let mut sum = 0; loop { sum += i; i += 1; let divisors = multiplicities(sum).values().fold(1, |acc, d| acc * (1 + d)); //dbg!(sum, divisors); if divisors > 500 { println!("value: {}, the {}th triangle number", sum, i); break; } } } #[timings] fn e13() { let s: Vec<String> = std::fs::read_to_string("src/e13.txt") .unwrap() .split_whitespace() .map(|s| s.parse::<String>().unwrap()) .collect(); let s13: Vec<usize> = s .iter() .map(|l| l[..13].parse::<usize>().unwrap()) .collect(); let n = s13.iter().sum::<usize>().to_string(); println!("e13: {}", &n[..10]); } #[allow(dead_code)] fn collatz(n: usize) -> usize { match n % 2 { 0 => n / 2, 1 => 3 * n + 1, _ => unreachable!(), } } #[timings] fn e14() { use std::collections::HashMap; let mut h = HashMap::new(); h.insert(1, 0); let mut it_counter = 0; let mut biggest = (0, 0); for it in 2..1_000_000 { if h.contains_key(&it) { continue; } // Build a cache of values til we find a value we have seen let mut next = collatz(it); it_counter += 1; let mut cache: Vec<(usize, usize)> = vec![(it, it_counter)]; // 2: 1 while h.get(&next).is_none() { it_counter += 1; cache.push((next, it_counter)); next = collatz(next); } // the next value is now in the hashmap let count_last = *h.get(&next).unwrap(); let count_for_it = count_last + it_counter; //println!("it:{},count: {}", it, count_for_it); for (n, c) in cache { let count = count_for_it + 1 - c; //println!("n:{},c: {}, count: {}", n, c, count); h.insert(n, count); } it_counter = 0; if count_for_it > biggest.0 { biggest = (count_for_it, it); } } println!("biggest seq len: {:?}, for n={:?}", biggest.0, biggest.1); } #[timings] //https://github.com/zacharydenton/euler/blob/master/014/collatz.rs fn e14_zach_denton() { let mut collatz: Vec<usize> = vec![0; 1_000_000]; collatz[1] = 1; let max = (2..collatz.len()) .max_by_key(|&i| { let f = |n: usize| match n % 2 { 0 => n / 2, _ => n * 3 + 1, }; // og: let (mut j, mut len) = (i, 0); loop { // exit if: if j < collatz.len() && collatz[j] != 0 { break; } len += 1; j = f(j); } len += collatz[j]; collatz[i] = len; len }) .unwrap(); println!("{}", max); } // How many such (only move left or down) routes are there through a 20×20 grid? #[timings] fn e15() { // basic combinatorics. of 40 positions, choose 20. Equally, the 20th Catalan. let a: u128 = (21..=40).product(); let b: u128 = (2..=20).product(); println!("{}", a / b); } #[timings] fn e16() { // mostly, futzing with bigint. use num_bigint::BigUint; // note that 2**1000 will have about 300 digits, so can't fit into a normal integer representation. Need a bigint. let a = BigUint::new(vec![2]); let b = a.pow(1000); //println!("{:?}", b); // TFAE: //let res = b.to_string().chars().fold(0, |a, d| a + d.to_digit(10).unwrap()); let res: u32 = b.to_string().chars().map(|c| c.to_digit(10).unwrap()).sum(); println!("{:?}", res); //let digits: num::BigInt = 2.pow(1000); } // If all the numbers from 1 to 1000 (one thousand) inclusive were written out in words, how many letters would be used? #[timings] fn e17() { let map = vec![ (0, 0), (1, 3), (2, 3), (3, 5), (4, 4), (5, 4), (6, 3), (7, 5), (8, 5), (9, 4), (10, 3), (11, 6), (12, 6), (13, 8), (14, 8), (15, 7), (16, 7), (17, 9), (18, 8), (19, 8), (20, 6), (30, 6), (40, 5), (50, 5), (60, 5), (70, 7), (80, 6), (90, 6), ]; let h = std::collections::HashMap::from_iter(map.into_iter()); let res: usize = (1..=1000).fold(0, |acc, x| acc + count_letters(x, &h)); println!("{}", res); } fn count_letters(d: usize, h: &std::collections::HashMap<usize, usize>) -> usize { let (a, b, c, e) = (d % 10, d / 10 % 10, d / 100 % 10, d / 1000 % 10); let aa = if b == 1 { 0 } else { *h.get(&a).unwrap() }; let bb = if b == 1 { *h.get(&(b * 10 + a)).unwrap() } else { *h.get(&(b * 10)).unwrap() }; let mut cc = if c > 0 { 3 + 7 + h.get(&c).unwrap() } else { 0 }; // "and" counts apparently if c > 0 && aa == 0 && bb == 0 { cc -= 3 // 100 doesn't have an "and" }; let ee = if e > 0 { 8 + h.get(&e).unwrap() } else { 0 }; //println!("{}:{},{},{},{}", d, ee, cc, bb, aa); aa + bb + cc + ee } // first problem to be a bit of a challenge. I struggled picking a data structure and strategy for this one. // A couple possible approaches occur: // naive: at each step, pick the greatest next value // brute: calculate the value of all 2^14 paths, not hard // pruning: similar to brute, but if some sufficiently low sequence is included, exit early (optimization parameters: how often to prune, and what sufficiently low means) // This problem begs to be solved recursively somehow. #[timings] fn e18() { let triangle: Vec<Vec<usize>> = std::fs::read_to_string("src/e18.txt") .unwrap() .lines() .map(|l| { l.split_whitespace() .into_iter() .map(|n| n.parse::<usize>().unwrap()) .collect::<Vec<usize>>() }) .collect(); let res = e18_less_naive_r(&triangle[1..], 75, 0); println!("{}", res); } /// traverse the triangle picking the greatest value at the next binary choice #[allow(dead_code)] fn e18_naive_r(t: &[Vec<usize>], running_sum: usize, last_index: usize) -> usize { if t.is_empty() { running_sum } else { let (rs, li) = if t[0][last_index] > t[0][last_index + 1] { (t[0][last_index], last_index) } else { (t[0][last_index + 1], last_index + 1) }; println!("append:{},{}", rs, li); e18_naive_r(&t[1..], running_sum + rs, li) } } // 18 minutes to try naively. Now let's try a little harder. // let's try something with look ahead. const PEEK_DIST: usize = 5; /// traverse the triangle picking the greatest single step-PEEK_DIST-chain at each next binary choice fn e18_less_naive_r(t: &[Vec<usize>], running_sum: usize, last_index: usize) -> usize { if t.is_empty() { running_sum } else { // need to peek here let (_, dir, _path) = peek_ahead_r(t, running_sum, last_index, PEEK_DIST, None, vec![]); let (val, ind) = match dir { Dir::Left => (t[0][last_index], last_index), Dir::Right => (t[0][last_index + 1], last_index + 1), }; //println!("append val:{}, ind:{}, path:{:?}", val, ind, _path); e18_less_naive_r(&t[1..], running_sum + val, ind) } } // if looking ahead 1 step, terminate, returning (running_sum, LEFT|RIGHT) #[derive(Clone, Debug)] enum Dir { Left, Right, } fn peek_ahead_r( t: &[Vec<usize>], running_sum: usize, last_index: usize, mut peek_dist: usize, first_step: Option<Dir>, /* debugging */ mut path: Vec<(usize, usize)>, ) -> (usize /* value */, Dir, Vec<(usize, usize)>) { if peek_dist > t.len() { peek_dist = t.len() } assert!(peek_dist > 0); if peek_dist == 1 { // if tie: prefer rightward motion, THIS IS A (temporarily acceptable) BUG if t[0][last_index] > t[0][last_index + 1] { path.push((t[0][last_index], last_index)); ( t[0][last_index] + running_sum, first_step.unwrap_or(Dir::Left), path, ) } else { path.push((t[0][last_index + 1], last_index + 1)); ( t[0][last_index + 1] + running_sum, first_step.unwrap_or(Dir::Right), path, ) } } else { let mut p_left = path.clone(); p_left.push((t[0][last_index], last_index)); let left = peek_ahead_r( &t[1..], running_sum + t[0][last_index], last_index, peek_dist - 1, first_step.clone().unwrap_or(Dir::Left).into(), p_left, ); let mut p_right = path.clone(); p_right.push((t[0][last_index + 1], last_index + 1)); let right = peek_ahead_r( &t[1..], running_sum + t[0][last_index + 1], last_index + 1, peek_dist - 1, first_step.unwrap_or(Dir::Right).into(), p_right, ); if left.0 > right.0 { left } else { right } } } // How many Sundays fell on the first of the month during the twentieth century (1 Jan 1901 to 31 Dec 2000)? #[timings] fn e19() { // Sundays are uniformly distributed, with P(first is Sunday) = 1/7. // How many first of the months were there? 12*100 println!("{}", 12.0 * 100.0 / 7.0); } // Can't win em all. But when ya do~ #[timings] fn e20() { // Find the sum of the digits in the number 100! // would expect the number of digits to be roughly equiv to 50^100, which has about 150 digits, though there will of course be many zeroes, about 24. Still, it seems probably best to just shove it in a bigint. Anything more creative? 97 multiplications (2..99). Some theorem may exist about the sum of digits of the product of two numbers, could search for it. Meh, thought for 5 minutes, do the bigint thing. use num_bigint::BigUint; // note that 2**1000 will have about 300 digits, so can't fit into a normal integer representation. Need a bigint. let a = BigUint::new(vec![2]); let a = (3..=99).fold(a, |acc, i| acc * (i as u32)); let res = a .to_string() .chars() .fold(0, |acc, i| acc + i.to_digit(10).unwrap()); println!("{:?}", res); } fn main() { e11(); e12(); e13(); //e14(); e14_zach_denton(); e15(); e16(); e17(); e18(); e19(); e20(); }
e11
identifier_name